diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..7b01579 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "Pretrain/UniFormerV2"] + path = Pretrain/UniFormerV2 + url = https://github.com/OpenGVLab/UniFormerV2.git +[submodule "Downstream/Ego-Tasks"] + path = Downstream/Ego-Tasks + url = https://github.com/OpenGVLab/ego4d-eccv2022-solutions.git diff --git a/Data/InternVid/README.md b/Data/InternVid/README.md new file mode 100644 index 0000000..f924062 --- /dev/null +++ b/Data/InternVid/README.md @@ -0,0 +1,60 @@ +# InternVid \[[Paper](https://arxiv.org/pdf/2307.06942.pdf)\] + +[![Dataset meta](https://img.shields.io/badge/%F0%9F%A4%97%20InternVid-Dataset-blue)](https://huggingface.co/datasets/OpenGVLab/InternVid) | [![Model Checkpoint](https://img.shields.io/badge/%F0%9F%A4%97%20ViCLIP-Model-purple)](https://huggingface.co/OpenGVLab/ViCLIP) + +# :fire: News +We are excited to announce the partial release of a large-scale video-text dataset aimed at facilitating multimodal understanding and generation. As part of this release, we are making available a [subset](https://huggingface.co/datasets/OpenGVLab/InternVid) of the dataset, which comprises 10 million video clips. Additionally, we have provided a [ViCLIP](https://huggingface.co/OpenGVLab/ViCLIP) model trained on this subset, using the ViT-L architecture. It achieves SOTA zero-shot action recognition performance on Kinetics. + +We give a step-by-step instructions and clarify the process of accessing and utilizing ViClip in [demo.ipynb](https://github.com/OpenGVLab/InternVideo/blob/main/Data/InternVid/demo.ipynb). + +Stay tuned for updates! + +# Introduction + +**Data** + +We collected videos from 16 popular categories with varying percentages. We ensured diversity by selecting videos from countries with different languages instead of relying on a dominant language environment. The countries we sampled from include the UK, USA, Australia, Japan, Korea, China, Russia, and France, among others. In terms of duration, every video lasts 351.9s on average. Almost half (49%) of the videos are five minutes or less, while a quarter (26%) fall between five and ten minutes. Only 8% of the videos are over 20 minutes long. Among the curated videos, 85% were high-resolution (720P), while the remaining 15% had lower resolutions ranging from 360P to 720P. Although the lower-resolution videos may not perform as well as the high-resolution ones in content generation tasks, they can still be useful in video-language representation learning, provided that they have appropriate captions. + +![b469e00b43d46a6b3f89899483abcf6](https://github.com/OpenGVLab/InternVideo/assets/43169235/7d6aca7d-362a-425d-9ef2-ec0189491b52) + +InternVid exhibits diverse clip durations and caption lengths in the segmented clip level. The aesthetic scores and clip-caption similarities are distributed uniformly. The majority of clips are 0-10 seconds in length, accounting for 85% of all clips. Approximately half of the clips have captions with 10-20 words, while one-third of the clip captions have fewer than 10 words. About 11% of clips have long captions with more than 20 words. + +![429af4993adb77478c000c865ae5a1b](https://github.com/OpenGVLab/InternVideo/assets/43169235/f64588c3-81e8-43de-b771-46500474d2ff) + +**ViCLIP: a simple video CLIP for transferrable video-text representation** + +Built upon CLIP, we make a simple video-text pretraining baseline ViCLIP. It consists of a video encoder (ViT) and a text encoder, as given below. Both modules are initialized from the corresponding CLIP components. We update the native attention in the video encoder to spatiotemporal attention while maintaining other design elements. For efficient learning, we apply masking to videos in pre-training. + +87c6263cc4aceee72cc8e37085a8109 + + +# Data & Model Zoo + +### Pretrained Data & Model +
+ +| Model | Training Data | Descriptions | +| :-----------------: | :----------------------: | :---------------------------------------------------------------------------------------------------: | +| ViCLIP-L-14 \[[HuggingFace](https://huggingface.co/OpenGVLab/ViCLIP) \| [Aliyun](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/viclip/ViClip-InternVid-10M-FLT.pth )\] | InternVid-10M-FLT \[[HuggingFace](https://huggingface.co/datasets/OpenGVLab/InternVid) \| [OpenDataLab](https://opendatalab.com/shepshep/InternVid)\] | | +
+ + +## Citation + +If you find this work useful for your research, please consider citing InternVid. Your acknowledgement would greatly help us in continuing to contribute resources to the research community. + +``` +@article{wang2023internvid, + title={InternVid: A Large-scale Video-Text Dataset for Multimodal Understanding and Generation}, + author={Wang, Yi and He, Yinan and Li, Yizhuo and Li, Kunchang and Yu, Jiashuo and Ma, Xin and Chen, Xinyuan and Wang, Yaohui and Luo, Ping and Liu, Ziwei and Wang, Yali and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2307.06942}, + year={2023} +} + +@article{wang2022internvideo, + title={InternVideo: General Video Foundation Models via Generative and Discriminative Learning}, + author={Wang, Yi and Li, Kunchang and Li, Yizhuo and He, Yinan and Huang, Bingkun and Zhao, Zhiyu and Zhang, Hongjie and Xu, Jilan and Liu, Yi and Wang, Zun and Xing, Sen and Chen, Guo and Pan, Junting and Yu, Jiashuo and Wang, Yali and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2212.03191}, + year={2022} +} +``` diff --git a/Data/InternVid/demo.ipynb b/Data/InternVid/demo.ipynb new file mode 100644 index 0000000..586abfc --- /dev/null +++ b/Data/InternVid/demo.ipynb @@ -0,0 +1,102 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f86bc499", + "metadata": {}, + "source": [ + "## download ViCILP weights and put its pth file in viclip folder. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7a90379-d9ee-45d9-9073-7ed5132fa6b1", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import os\n", + "import cv2\n", + "\n", + "from viclip import retrieve_text, _frame_from_video" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a425a5da-ceaf-4b89-9845-c8ba576902d8", + "metadata": {}, + "outputs": [], + "source": [ + "video = cv2.VideoCapture('example1.mp4')\n", + "frames = [x for x in _frame_from_video(video)]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "3fb7397a-02ef-41b5-9ffe-f2363b277778", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "text: A man in a gray sweater plays fetch with his dog in the snowy yard, throwing a toy and watching it run. ~ prob: 0.8264\n", + "text: A playful dog and its owner wrestle in the snowy yard, chasing each other with joyous abandon. ~ prob: 0.1587\n", + "text: A pet dog excitedly runs through the snowy yard, chasing a toy thrown by its owner. ~ prob: 0.0141\n", + "text: A person dressed in a blue jacket shovels the snow-covered pavement outside their house. ~ prob: 0.0006\n", + "text: A playful dog slides down a snowy hill, wagging its tail with delight. ~ prob: 0.0002\n" + ] + } + ], + "source": [ + "text_candidates = [\"A playful dog and its owner wrestle in the snowy yard, chasing each other with joyous abandon.\",\n", + " \"A man in a gray coat walks through the snowy landscape, pulling a sleigh loaded with toys.\",\n", + " \"A person dressed in a blue jacket shovels the snow-covered pavement outside their house.\",\n", + " \"A pet dog excitedly runs through the snowy yard, chasing a toy thrown by its owner.\",\n", + " \"A person stands on the snowy floor, pushing a sled loaded with blankets, preparing for a fun-filled ride.\",\n", + " \"A man in a gray hat and coat walks through the snowy yard, carefully navigating around the trees.\",\n", + " \"A playful dog slides down a snowy hill, wagging its tail with delight.\",\n", + " \"A person in a blue jacket walks their pet on a leash, enjoying a peaceful winter walk among the trees.\",\n", + " \"A man in a gray sweater plays fetch with his dog in the snowy yard, throwing a toy and watching it run.\",\n", + " \"A person bundled up in a blanket walks through the snowy landscape, enjoying the serene winter scenery.\"]\n", + "\n", + "texts, probs = retrieve_text(frames, text_candidates, name='viclip', topk=5)\n", + "\n", + "for t, p in zip(texts, probs):\n", + " print(f'text: {t} ~ prob: {p:.4f}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2969ba6-19d0-4893-b071-b82fa046c312", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Data/InternVid/example1.mp4 b/Data/InternVid/example1.mp4 new file mode 100644 index 0000000..815f841 Binary files /dev/null and b/Data/InternVid/example1.mp4 differ diff --git a/Data/InternVid/viclip/README.md b/Data/InternVid/viclip/README.md new file mode 100644 index 0000000..32897cd --- /dev/null +++ b/Data/InternVid/viclip/README.md @@ -0,0 +1,3 @@ +--- +license: mit +--- diff --git a/Data/InternVid/viclip/__init__.py b/Data/InternVid/viclip/__init__.py new file mode 100644 index 0000000..389eb2c --- /dev/null +++ b/Data/InternVid/viclip/__init__.py @@ -0,0 +1,71 @@ +from .simple_tokenizer import SimpleTokenizer as _Tokenizer +from .viclip import ViCLIP +import torch +import numpy as np +import cv2 + +clip_candidates = {'viclip':None, 'clip':None} + +def get_clip(name='viclip'): + global clip_candidates + m = clip_candidates[name] + if m is None: + if name == 'viclip': + tokenizer = _Tokenizer() + vclip = ViCLIP(tokenizer) + # m = vclip + m = (vclip, tokenizer) + else: + raise Exception('the target clip model is not found.') + + return m + +def get_text_feat_dict(texts, clip, tokenizer, text_feat_d={}): + for t in texts: + feat = clip.get_text_features(t, tokenizer, text_feat_d) + text_feat_d[t] = feat + return text_feat_d + +def get_vid_feat(frames, clip): + return clip.get_vid_features(frames) + +def _frame_from_video(video): + while video.isOpened(): + success, frame = video.read() + if success: + yield frame + else: + break + +v_mean = np.array([0.485, 0.456, 0.406]).reshape(1,1,3) +v_std = np.array([0.229, 0.224, 0.225]).reshape(1,1,3) +def normalize(data): + return (data/255.0-v_mean)/v_std + +def frames2tensor(vid_list, fnum=8, target_size=(224, 224), device=torch.device('cuda')): + assert(len(vid_list) >= fnum) + step = len(vid_list) // fnum + vid_list = vid_list[::step][:fnum] + vid_list = [cv2.resize(x[:,:,::-1], target_size) for x in vid_list] + vid_tube = [np.expand_dims(normalize(x), axis=(0, 1)) for x in vid_list] + vid_tube = np.concatenate(vid_tube, axis=1) + vid_tube = np.transpose(vid_tube, (0, 1, 4, 2, 3)) + vid_tube = torch.from_numpy(vid_tube).to(device, non_blocking=True).float() + return vid_tube + +def retrieve_text(frames, texts, name='viclip', topk=5, device=torch.device('cuda')): + clip, tokenizer = get_clip(name) + clip = clip.to(device) + frames_tensor = frames2tensor(frames, device=device) + vid_feat = get_vid_feat(frames_tensor, clip) + + text_feat_d = {} + text_feat_d = get_text_feat_dict(texts, clip, tokenizer, text_feat_d) + text_feats = [text_feat_d[t] for t in texts] + text_feats_tensor = torch.cat(text_feats, 0) + + probs, idxs = clip.get_predict_label(vid_feat, text_feats_tensor, top=topk) + + ret_texts = [texts[i] for i in idxs.numpy()[0].tolist()] + return ret_texts, probs.numpy()[0] + diff --git a/Data/InternVid/viclip/bpe_simple_vocab_16e6.txt.gz b/Data/InternVid/viclip/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Data/InternVid/viclip/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Data/InternVid/viclip/simple_tokenizer.py b/Data/InternVid/viclip/simple_tokenizer.py new file mode 100644 index 0000000..0f2feb7 --- /dev/null +++ b/Data/InternVid/viclip/simple_tokenizer.py @@ -0,0 +1,135 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") +# @lru_cache() +# def default_bpe(): +# return "bpe_simple_vocab_16e6.txt.gz" + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Data/InternVid/viclip/viclip.py b/Data/InternVid/viclip/viclip.py new file mode 100644 index 0000000..02ed605 --- /dev/null +++ b/Data/InternVid/viclip/viclip.py @@ -0,0 +1,229 @@ +import os +import logging + +import torch +from einops import rearrange +from torch import nn +import math + +# from .criterions import VTC_VTM_Loss +from .simple_tokenizer import SimpleTokenizer as _Tokenizer +from .viclip_vision import clip_joint_l14 +from .viclip_text import clip_text_l14 + +logger = logging.getLogger(__name__) + + +class ViCLIP(nn.Module): + """docstring for ViCLIP""" + + def __init__(self, tokenizer=None, pretrain=os.path.join(os.path.dirname(os.path.abspath(__file__)), "ViClip-InternVid-10M-FLT.pth"), freeze_text=True): + super(ViCLIP, self).__init__() + if tokenizer: + self.tokenizer = tokenizer + else: + self.tokenizer = _Tokenizer() + self.max_txt_l = 32 + + self.vision_encoder_name = 'vit_l14' + + self.vision_encoder_pretrained = False + self.inputs_image_res = 224 + self.vision_encoder_kernel_size = 1 + self.vision_encoder_center = True + self.video_input_num_frames = 8 + self.vision_encoder_drop_path_rate = 0.1 + self.vision_encoder_checkpoint_num = 24 + self.is_pretrain = pretrain + self.vision_width = 1024 + self.text_width = 768 + self.embed_dim = 768 + self.masking_prob = 0.9 + + self.text_encoder_name = 'vit_l14' + self.text_encoder_pretrained = False#'bert-base-uncased' + self.text_encoder_d_model = 768 + + self.text_encoder_vocab_size = 49408 + + + # create modules. + self.vision_encoder = self.build_vision_encoder() + self.text_encoder = self.build_text_encoder() + + self.temp = nn.parameter.Parameter(torch.ones([]) * 1 / 100.0) + self.temp_min = 1 / 100.0 + + if pretrain: + logger.info(f"Load pretrained weights from {pretrain}") + state_dict = torch.load(pretrain, map_location='cpu')['model'] + self.load_state_dict(state_dict) + + # Freeze weights + if freeze_text: + self.freeze_text() + + + + def freeze_text(self): + """freeze text encoder""" + for p in self.text_encoder.parameters(): + p.requires_grad = False + + def no_weight_decay(self): + ret = {"temp"} + ret.update( + {"vision_encoder." + k for k in self.vision_encoder.no_weight_decay()} + ) + ret.update( + {"text_encoder." + k for k in self.text_encoder.no_weight_decay()} + ) + + return ret + + def forward(self, image, text, raw_text, idx, log_generation=None, return_sims=False): + """forward and calculate loss. + + Args: + image (torch.Tensor): The input images. Shape: [B,T,C,H,W]. + text (dict): TODO + idx (torch.Tensor): TODO + + Returns: TODO + + """ + self.clip_contrastive_temperature() + + vision_embeds = self.encode_vision(image) + text_embeds = self.encode_text(raw_text) + if return_sims: + sims = torch.nn.functional.normalize(vision_embeds, dim=-1) @ \ + torch.nn.functional.normalize(text_embeds, dim=-1).transpose(0, 1) + return sims + + # calculate loss + + ## VTC loss + loss_vtc = self.clip_loss.vtc_loss( + vision_embeds, text_embeds, idx, self.temp, all_gather=True + ) + + return dict( + loss_vtc=loss_vtc, + ) + + def encode_vision(self, image, test=False): + """encode image / videos as features. + + Args: + image (torch.Tensor): The input images. + test (bool): Whether testing. + + Returns: tuple. + - vision_embeds (torch.Tensor): The features of all patches. Shape: [B,T,L,C]. + - pooled_vision_embeds (torch.Tensor): The pooled features. Shape: [B,T,C]. + + """ + if image.ndim == 5: + image = image.permute(0, 2, 1, 3, 4).contiguous() + else: + image = image.unsqueeze(2) + + if not test and self.masking_prob > 0.0: + return self.vision_encoder( + image, masking_prob=self.masking_prob + ) + + return self.vision_encoder(image) + + def encode_text(self, text): + """encode text. + Args: + text (dict): The output of huggingface's `PreTrainedTokenizer`. contains keys: + - input_ids (torch.Tensor): Token ids to be fed to a model. Shape: [B,L]. + - attention_mask (torch.Tensor): The mask indicate padded tokens. Shape: [B,L]. 0 is padded token. + - other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". + Returns: tuple. + - text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C]. + - pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C]. + + """ + device = next(self.text_encoder.parameters()).device + text = self.text_encoder.tokenize( + text, context_length=self.max_txt_l + ).to(device) + text_embeds = self.text_encoder(text) + return text_embeds + + @torch.no_grad() + def clip_contrastive_temperature(self, min_val=0.001, max_val=0.5): + """Seems only used during pre-training""" + self.temp.clamp_(min=self.temp_min) + + def build_vision_encoder(self): + """build vision encoder + Returns: (vision_encoder, vision_layernorm). Each is a `nn.Module`. + + """ + encoder_name = self.vision_encoder_name + if encoder_name != "vit_l14": + raise ValueError(f"Not implemented: {encoder_name}") + vision_encoder = clip_joint_l14( + pretrained=self.vision_encoder_pretrained, + input_resolution=self.inputs_image_res, + kernel_size=self.vision_encoder_kernel_size, + center=self.vision_encoder_center, + num_frames=self.video_input_num_frames, + drop_path=self.vision_encoder_drop_path_rate, + checkpoint_num=self.vision_encoder_checkpoint_num, + ) + return vision_encoder + + def build_text_encoder(self): + """build text_encoder and possiblly video-to-text multimodal fusion encoder. + Returns: nn.Module. The text encoder + + """ + encoder_name = self.text_encoder_name + if encoder_name != "vit_l14": + raise ValueError(f"Not implemented: {encoder_name}") + text_encoder = clip_text_l14( + pretrained=self.text_encoder_pretrained, + embed_dim=self.text_encoder_d_model, + context_length=self.max_txt_l, + vocab_size=self.text_encoder_vocab_size, + checkpoint_num=0, + ) + + return text_encoder + + def get_text_encoder(self): + """get text encoder, used for text and cross-modal encoding""" + encoder = self.text_encoder + return encoder.bert if hasattr(encoder, "bert") else encoder + + def get_text_features(self, input_text, tokenizer, text_feature_dict={}): + if input_text in text_feature_dict: + return text_feature_dict[input_text] + text_template= f"{input_text}" + with torch.no_grad(): + # text_token = tokenizer.encode(text_template).cuda() + text_features = self.encode_text(text_template).float() + text_features /= text_features.norm(dim=-1, keepdim=True) + text_feature_dict[input_text] = text_features + return text_features + + def get_vid_features(self, input_frames): + with torch.no_grad(): + clip_feat = self.encode_vision(input_frames,test=True).float() + clip_feat /= clip_feat.norm(dim=-1, keepdim=True) + return clip_feat + + def get_predict_label(self, clip_feature, text_feats_tensor, top=5): + label_probs = (100.0 * clip_feature @ text_feats_tensor.T).softmax(dim=-1) + top_probs, top_labels = label_probs.cpu().topk(top, dim=-1) + return top_probs, top_labels + + +if __name__ =="__main__": + tokenizer = _Tokenizer() diff --git a/Data/InternVid/viclip/viclip_text.py b/Data/InternVid/viclip/viclip_text.py new file mode 100644 index 0000000..6d9a959 --- /dev/null +++ b/Data/InternVid/viclip/viclip_text.py @@ -0,0 +1,271 @@ +import os +import logging +from collections import OrderedDict +from pkg_resources import packaging +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +import torch.utils.checkpoint as checkpoint +import functools + +logger = logging.getLogger(__name__) + + +# On P1, model extracted from https://huggingface.co/laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K +MODEL_PATH = 'https://huggingface.co/laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K' +_MODELS = { + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14_text.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, + checkpoint_num: int = 0): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + self.checkpoint_num = checkpoint_num + + def forward(self, x: torch.Tensor): + if self.checkpoint_num > 0: + segments = min(self.checkpoint_num, len(self.resblocks)) + return checkpoint.checkpoint_sequential(self.resblocks, segments, x) + else: + return self.resblocks(x) + + +class CLIP_TEXT(nn.Module): + def __init__( + self, + embed_dim: int, + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + checkpoint_num: int, + ): + super().__init__() + + self.context_length = context_length + self._tokenizer = _Tokenizer() + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask(), + checkpoint_num=checkpoint_num, + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + + def no_weight_decay(self): + return {'token_embedding', 'positional_embedding'} + + @functools.lru_cache(maxsize=None) + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + def tokenize(self, texts, context_length=77, truncate=True): + """ + Returns the tokenized representation of given input string(s) + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + context_length : int + The context length to use; all CLIP models use 77 as the context length + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = self._tokenizer.encoder["<|startoftext|>"] + eot_token = self._tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + self._tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + + return result + + def forward(self, text): + x = self.token_embedding(text) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + + return x + + +def clip_text_b16( + embed_dim=512, + context_length=77, + vocab_size=49408, + transformer_width=512, + transformer_heads=8, + transformer_layers=12, +): + raise NotImplementedError + model = CLIP_TEXT( + embed_dim, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers + ) + pretrained = _MODELS["ViT-B/16"] + logger.info(f"Load pretrained weights from {pretrained}") + state_dict = torch.load(pretrained, map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def clip_text_l14( + embed_dim=768, + context_length=77, + vocab_size=49408, + transformer_width=768, + transformer_heads=12, + transformer_layers=12, + checkpoint_num=0, + pretrained=True, +): + model = CLIP_TEXT( + embed_dim, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers, + checkpoint_num, + ) + if pretrained: + if isinstance(pretrained, str) and pretrained != "bert-base-uncased": + pretrained = _MODELS[pretrained] + else: + pretrained = _MODELS["ViT-L/14"] + logger.info(f"Load pretrained weights from {pretrained}") + state_dict = torch.load(pretrained, map_location='cpu') + if context_length != state_dict["positional_embedding"].size(0): + # assert context_length < state_dict["positional_embedding"].size(0), "Cannot increase context length." + print(f"Resize positional embedding from {state_dict['positional_embedding'].size(0)} to {context_length}") + if context_length < state_dict["positional_embedding"].size(0): + state_dict["positional_embedding"] = state_dict["positional_embedding"][:context_length] + else: + state_dict["positional_embedding"] = F.pad( + state_dict["positional_embedding"], + (0, 0, 0, context_length - state_dict["positional_embedding"].size(0)), + value=0, + ) + + message = model.load_state_dict(state_dict, strict=False) + print(f"Load pretrained weights from {pretrained}: {message}") + return model.eval() + + +def clip_text_l14_336( + embed_dim=768, + context_length=77, + vocab_size=49408, + transformer_width=768, + transformer_heads=12, + transformer_layers=12, +): + raise NotImplementedError + model = CLIP_TEXT( + embed_dim, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers + ) + pretrained = _MODELS["ViT-L/14_336"] + logger.info(f"Load pretrained weights from {pretrained}") + state_dict = torch.load(pretrained, map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def build_clip(config): + model_cls = config.text_encoder.clip_teacher + model = eval(model_cls)() + return model diff --git a/Data/InternVid/viclip/viclip_vision.py b/Data/InternVid/viclip/viclip_vision.py new file mode 100644 index 0000000..1d0598a --- /dev/null +++ b/Data/InternVid/viclip/viclip_vision.py @@ -0,0 +1,354 @@ +#!/usr/bin/env python +import os +import logging +from collections import OrderedDict + +import torch +from torch import nn +from einops import rearrange +from timm.models.layers import DropPath +from timm.models.registry import register_model + +import torch.utils.checkpoint as checkpoint + +# from models.utils import load_temp_embed_with_mismatch + +logger = logging.getLogger(__name__) + +def load_temp_embed_with_mismatch(temp_embed_old, temp_embed_new, add_zero=True): + """ + Add/Remove extra temporal_embeddings as needed. + https://arxiv.org/abs/2104.00650 shows adding zero paddings works. + + temp_embed_old: (1, num_frames_old, 1, d) + temp_embed_new: (1, num_frames_new, 1, d) + add_zero: bool, if True, add zero, else, interpolate trained embeddings. + """ + # TODO zero pad + num_frms_new = temp_embed_new.shape[1] + num_frms_old = temp_embed_old.shape[1] + logger.info(f"Load temporal_embeddings, lengths: {num_frms_old}-->{num_frms_new}") + if num_frms_new > num_frms_old: + if add_zero: + temp_embed_new[ + :, :num_frms_old + ] = temp_embed_old # untrained embeddings are zeros. + else: + temp_embed_new = interpolate_temporal_pos_embed(temp_embed_old, num_frms_new) + elif num_frms_new < num_frms_old: + temp_embed_new = temp_embed_old[:, :num_frms_new] + else: # = + temp_embed_new = temp_embed_old + return temp_embed_new + + +# On P1, model extracted from https://huggingface.co/laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K +MODEL_PATH = '' +_MODELS = { + "ViT-L/14": os.path.join(MODEL_PATH, "ViClip-InternVid-10M-FLT.pth"), +} + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, drop_path=0., attn_mask=None, dropout=0.): + super().__init__() + + self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() + # logger.info(f'Droppath: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head, dropout=dropout) + self.ln_1 = nn.LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("drop1", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_model * 4, d_model)), + ("drop2", nn.Dropout(dropout)), + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x): + x = x + self.drop_path1(self.attention(self.ln_1(x))) + x = x + self.drop_path2(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, drop_path=0., checkpoint_num=0, dropout=0.): + super().__init__() + dpr = [x.item() for x in torch.linspace(0, drop_path, layers)] + self.resblocks = nn.ModuleList() + for idx in range(layers): + self.resblocks.append(ResidualAttentionBlock(width, heads, drop_path=dpr[idx], dropout=dropout)) + self.checkpoint_num = checkpoint_num + + def forward(self, x): + for idx, blk in enumerate(self.resblocks): + if idx < self.checkpoint_num: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + return x + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim=None, + kernel_size=1, num_frames=8, drop_path=0, checkpoint_num=0, dropout=0., + temp_embed=True, + ): + super().__init__() + self.output_dim = output_dim + self.conv1 = nn.Conv3d( + 3, width, + (kernel_size, patch_size, patch_size), + (kernel_size, patch_size, patch_size), + (0, 0, 0), bias=False + ) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = nn.LayerNorm(width) + if temp_embed: + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + + self.transformer = Transformer( + width, layers, heads, drop_path=drop_path, checkpoint_num=checkpoint_num, + dropout=dropout) + + self.ln_post = nn.LayerNorm(width) + if output_dim is not None: + self.proj = nn.Parameter(torch.empty(width, output_dim)) + else: + self.proj = None + + self.dropout = nn.Dropout(dropout) + + def get_num_layers(self): + return len(self.transformer.resblocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'positional_embedding', 'class_embedding', 'temporal_positional_embedding'} + + def mask_tokens(self, inputs, masking_prob=0.0): + B, L, _ = inputs.shape + + # This is different from text as we are masking a fix number of tokens + Lm = int(masking_prob * L) + masked_indices = torch.zeros(B, L) + indices = torch.argsort(torch.rand_like(masked_indices), dim=-1)[:, :Lm] + batch_indices = ( + torch.arange(masked_indices.shape[0]).unsqueeze(-1).expand_as(indices) + ) + masked_indices[batch_indices, indices] = 1 + + masked_indices = masked_indices.bool() + + return inputs[~masked_indices].reshape(B, -1, inputs.shape[-1]) + + def forward(self, x, masking_prob=0.0): + x = self.conv1(x) # shape = [*, width, grid, grid] + B, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(B * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + # temporal pos + cls_tokens = x[:B, :1, :] + x = x[:, 1:] + x = rearrange(x, '(b t) n m -> (b n) t m', b=B, t=T) + if hasattr(self, 'temporal_positional_embedding'): + if x.size(1) == 1: + # This is a workaround for unused parameter issue + x = x + self.temporal_positional_embedding.mean(1) + else: + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t m -> b (n t) m', b=B, t=T) + + if masking_prob > 0.0: + x = self.mask_tokens(x, masking_prob) + + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) #BND -> NBD + x = self.transformer(x) + + x = self.ln_post(x) + + if self.proj is not None: + x = self.dropout(x[0]) @ self.proj + else: + x = x.permute(1, 0, 2) #NBD -> BND + + return x + + +def inflate_weight(weight_2d, time_dim, center=True): + logger.info(f'Init center: {center}') + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict, input_resolution=224, patch_size=16, center=True): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if k in state_dict_3d.keys() and state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + logger.info(f'Ignore: {k}') + continue + logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim, center=center) + + pos_embed_checkpoint = state_dict['positional_embedding'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = (input_resolution // patch_size) ** 2 + orig_size = int((pos_embed_checkpoint.shape[-2] - 1) ** 0.5) + new_size = int(num_patches ** 0.5) + if orig_size != new_size: + logger.info(f'Pos_emb from {orig_size} to {new_size}') + extra_tokens = pos_embed_checkpoint[:1] + pos_tokens = pos_embed_checkpoint[1:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(0, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=0) + state_dict['positional_embedding'] = new_pos_embed + + message = model.load_state_dict(state_dict, strict=False) + logger.info(f"Load pretrained weights: {message}") + + +@register_model +def clip_joint_b16( + pretrained=True, input_resolution=224, kernel_size=1, + center=True, num_frames=8, drop_path=0. +): + model = VisionTransformer( + input_resolution=input_resolution, patch_size=16, + width=768, layers=12, heads=12, output_dim=512, + kernel_size=kernel_size, num_frames=num_frames, + drop_path=drop_path, + ) + raise NotImplementedError + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict, input_resolution=input_resolution, patch_size=16, center=center) + return model.eval() + + +@register_model +def clip_joint_l14( + pretrained=False, input_resolution=224, kernel_size=1, + center=True, num_frames=8, drop_path=0., checkpoint_num=0, + dropout=0., +): + model = VisionTransformer( + input_resolution=input_resolution, patch_size=14, + width=1024, layers=24, heads=16, output_dim=768, + kernel_size=kernel_size, num_frames=num_frames, + drop_path=drop_path, checkpoint_num=checkpoint_num, + dropout=dropout, + ) + + if pretrained: + if isinstance(pretrained, str): + model_name = pretrained + else: + model_name = "ViT-L/14" + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS[model_name], map_location='cpu') + load_state_dict(model, state_dict, input_resolution=input_resolution, patch_size=14, center=center) + return model.eval() + + +@register_model +def clip_joint_l14_336( + pretrained=True, input_resolution=336, kernel_size=1, + center=True, num_frames=8, drop_path=0. +): + raise NotImplementedError + model = VisionTransformer( + input_resolution=input_resolution, patch_size=14, + width=1024, layers=24, heads=16, output_dim=768, + kernel_size=kernel_size, num_frames=num_frames, + drop_path=drop_path, + ) + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict, input_resolution=input_resolution, patch_size=14, center=center) + return model.eval() + + +def interpolate_pos_embed_vit(state_dict, new_model): + key = "vision_encoder.temporal_positional_embedding" + if key in state_dict: + vision_temp_embed_new = new_model.state_dict()[key] + vision_temp_embed_new = vision_temp_embed_new.unsqueeze(2) # [1, n, d] -> [1, n, 1, d] + vision_temp_embed_old = state_dict[key] + vision_temp_embed_old = vision_temp_embed_old.unsqueeze(2) + + state_dict[key] = load_temp_embed_with_mismatch( + vision_temp_embed_old, vision_temp_embed_new, add_zero=False + ).squeeze(2) + + key = "text_encoder.positional_embedding" + if key in state_dict: + text_temp_embed_new = new_model.state_dict()[key] + text_temp_embed_new = text_temp_embed_new.unsqueeze(0).unsqueeze(2) # [n, d] -> [1, n, 1, d] + text_temp_embed_old = state_dict[key] + text_temp_embed_old = text_temp_embed_old.unsqueeze(0).unsqueeze(2) + + state_dict[key] = load_temp_embed_with_mismatch( + text_temp_embed_old, text_temp_embed_new, add_zero=False + ).squeeze(2).squeeze(0) + return state_dict + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 8 + + # model = clip_joint_b16(pretrained=True, kernel_size=1, num_frames=8, num_classes=400, drop_path=0.1) + # logger.info(model) + model = clip_joint_l14(pretrained=False) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + logger.info(flop_count_table(flops, max_depth=1)) + logger.info(time.time()-s) + # logger.info(model(torch.rand(1, 3, num_frames, 224, 224)).shape) diff --git a/Data/instruction_data/README.md b/Data/instruction_data/README.md new file mode 100644 index 0000000..9abf8f3 --- /dev/null +++ b/Data/instruction_data/README.md @@ -0,0 +1,41 @@ +# Instruction data for [VideoChat](https://github.com/OpenGVLab/Ask-Anything/tree/main/video_chat) + +# :fire: Updates +- **2023/05/11**: Release the **V1**: [Google Drive](https://drive.google.com/file/d/1C-7xmf42QUEi4ApXTcxBHr5nLvTWXyUi/view?usp=sharing) | [Aliyun OSS](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/data/videochat/videochat_instruct_11k.json) + +# :speech_balloon: V1: 7K detailed descriptions + 4K multi-turn conversations + + We build a video-centric multimodal instruction data based on WebVid-10M. The corresponding detailed descriptions and multi-turn conversations generations are produced by ChatGPT based on video text (aided by [**VideoChat-Text**](https://github.com/OpenGVLab/Ask-Anything/tree/main/video_chat_with_ChatGPT)) with several prompts concerning **spatiotemporal features**. Compared with detailed video descriptions, video conversations are introduced to further improve data diversity by introducing **temporal and casual features** in the video instruction data. + +
+Example of detailed video description +
+
+ +
+ +
+Example of video conversation +
+
+ +
+ +# :page_facing_up: Citation + +If you find this project useful in your research, please consider cite: +```BibTeX +@article{2023videochat, + title={VideoChat: Chat-Centric Video Understanding}, + author={Li, Kunchang and He, Yinan and Wang, Yi and Li, Yizhuo and Wang, Wenhai and Luo, Ping and Wang, Yali and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2305.06355}, + year={2023} +} + +@article{wang2022internvideo, + title={InternVideo: General Video Foundation Models via Generative and Discriminative Learning}, + author={Wang, Yi and Li, Kunchang and Li, Yizhuo and He, Yinan and Huang, Bingkun and Zhao, Zhiyu and Zhang, Hongjie and Xu, Jilan and Liu, Yi and Wang, Zun and Xing, Sen and Chen, Guo and Pan, Junting and Yu, Jiashuo and Wang, Yali and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2212.03191}, + year={2022} +} +``` diff --git a/Data/instruction_data/assert/conversation.png b/Data/instruction_data/assert/conversation.png new file mode 100644 index 0000000..64220b0 Binary files /dev/null and b/Data/instruction_data/assert/conversation.png differ diff --git a/Data/instruction_data/assert/detailed_description.png b/Data/instruction_data/assert/detailed_description.png new file mode 100644 index 0000000..6a93076 Binary files /dev/null and b/Data/instruction_data/assert/detailed_description.png differ diff --git a/Downstream/Open-Set-Action-Recognition/.gitignore b/Downstream/Open-Set-Action-Recognition/.gitignore new file mode 100644 index 0000000..c8a1332 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/.gitignore @@ -0,0 +1,143 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +**/*.pyc + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +.github +.pylintrc +.pre-commit-config.yaml +.readthedocs.yml + +# custom +/data +.vscode +.idea +*.pkl +*.pkl.json +*.log.json +benchlist.txt +work_dirs/ + +# Pytorch +*.pth + +# Profile +*.prof + +# added by wentao +*.avi +temp/ +*.npz +*.pkl +experiments/*/results* +experiments/*/logs +*.o +*backup* +tools/data/mit/log* +tools/data/*log* +.github/workflows +demo/hmdb51* +demo/ucf101* +demo/*.avi +demo/*.mp4 \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/README.md b/Downstream/Open-Set-Action-Recognition/README.md new file mode 100644 index 0000000..cd6bd3d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/README.md @@ -0,0 +1,112 @@ +# Open Set Action Recognition + +## Table of Contents +1. [Introduction](#introduction) +1. [Installation](#installation) +1. [Datasets](#datasets) +1. [Testing](#testing) +1. [Training](#training) +1. [Model Zoo](#model-zoo) + + +## Introduction +VideoIntern not only recognizes known classes accurately but also has a strong perception ability for unknown classes that are out of training classes. This repo is one of the generalization tasks——Open Set Action Recognition. Specifically, we finetune VideoMAE backbone with a linear classification head on UCF-101 dataset from the evidential deep learning (EDL) perspective, without any model calibration methods used in [DEAR](https://github.com/Cogito2012/DEAR). Our VideoIntern model achieves significant and consistent performance gains compared to multiple action recognition backbones (i.e., I3D, TSM, SlowFast, TPN), which are trained in the DEAR way, with HMDB-51 or MiT-v2 dataset as the unknown. + +## Installation +This repo is developed from [MMAction2](https://github.com/open-mmlab/mmaction2) codebase. + +### Installation Steps +a. Create a conda virtual environment of this repo, and activate it: + +```shell +conda create -n OSAR python=3.7 -y +conda activate OSAR +``` + +b. Install PyTorch and TorchVision following the [official instructions](https://pytorch.org/), e.g., + +```shell +conda install pytorch=1.7.0 cudatoolkit=11.0 torchvision=0.8.0 -c pytorch +``` +c. Install mmcv, we recommend you to install the pre-build mmcv as below. + +```shell +pip install mmcv-full==1.2.2 -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.7.0/index.html +``` +**Important:** If you have already installed `mmcv` and try to install `mmcv-full`, you have to uninstall `mmcv` first by running `pip uninstall mmcv`. Otherwise, there will be `ModuleNotFoundError`. + +d. Clone the source code of this repo: + +```shell +git clone https://github.com/VideoIntern/Open-Set-Action-Recognition.git Open-Set-Action-Recognition-main +cd Open-Set-Action-Recognition-main +``` +e. Install build requirements and then install OSAR. + +```shell +pip install -r requirements/build.txt +python setup.py develop +``` + +If no error appears in your installation steps, then you are all set! + +## Datasets + +This repo uses standard video action datasets, i.e., UCF-101 for closed set training, and HMDB-51 and MiT-v2 test sets as two different unknowns. Please refer to the default [MMAction2 dataset setup steps](/tools/data/ucf101/README.md) to setup these three datasets correctly. + +**Note**: You can just ignore the `Step 3. Extract RGB and Flow` in the referred setup steps since all codes related to our paper do not rely on extracted frames and optical flow. This will save you large amount of disk space! + +## Testing + +To test our pre-trained models (see the [Model Zoo](#model-zoo)), you need to download a model file and unzip it under `work_dir`. Let's take the `I3D`-based DEAR model as an example. First, download the [pre-trained I3D-based models](https://drive.google.com/drive/folders/1TguABfmy0PE6jx9fflePQySe3jXWnsc0?usp=sharing), where the full DEAR model is saved in the folder `finetune_ucf101_i3d_edlnokl_avuc_debias`. The following directory tree is for your reference to place the downloaded files. +```shell +work_dirs +├── mae +│ ├── finetune_ucf101_mae_edlnokl +│ │   └── latest.pth + +``` +a. Get Uncertainty Threshold. +The threshold value of one model will be reported. +```shell +cd experiments/mae +# run the thresholding with BATCH_SIZE=16 on 8 GPUs +bash run_get_mae_threshold.sh edlnokl 16 8 +``` + +b. Out-of-Distribution Detection. +The uncertainty distribution figure of a specified model will be reported. +```shell +cd experiments/mae +bash run_ood_mae_dist_detection.sh HMDB edlnokl 8 +``` + +c. Compute AUROC. +The AUROC score of a specified model will be reported. +```shell +cd experiments/mae/results +python compute_auroc.py +``` + +## Training + +```shell +cd experiments/mae +bash finetune_mae_edlnokl_ucf101.sh 8 +``` + +## Model Zoo + +The pre-trained weights (checkpoints) are available below. +| Model | Checkpoint | Train Config | Test Config | Open Set AUC (%) | Closed Set ACC (%) | +|:--|:--:|:--:|:--:|:--:|:--:| +|I3D + DEAR |[ckpt](https://drive.google.com/file/d/1oRNBH0aAhFpcJSBqWlT4x0ru7iHfdndW/view?usp=sharing)| [train](configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py) | [test](configs/recognition/i3d/inference_i3d_enn.py) | 77.08 / 81.54 | 93.89 | +|TSM + DEAR | [ckpt](https://drive.google.com/file/d/1TM1c28jRyZpOrWqwaQPYXFBZJXHQp__9/view?usp=sharing)| [train](configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py) | [test](configs/recognition/tsm/inference_tsm_enn.py) | 78.65 / 83.92 | 94.48 | +|TPN + DEAR | [ckpt](https://drive.google.com/file/d/1jorfFMMzWd5xDCfZsemoWD8Rg7DbH16u/view?usp=sharing)| [train](configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py) | [test](configs/recognition/tpn/inference_tpn_slowonly_enn.py) | 79.23 / 81.80 | 96.30 | +|SlowFast + DEAR |[ckpt](https://drive.google.com/file/d/13LNRv0BYkVfzCA95RB5dCp53MmErRL5D/view?usp=sharing)| [train](configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py) | [test](configs/recognition/slowfast/inference_slowfast_enn.py) | 82.94 / 86.99 | 96.48 | +|InternVideo-B + EDL |[ckpt](https://drive.google.com/file/d/1lW1mHCbyfi0tvIxAjVzjr3g-AgK61ND3/view?usp=share_link)| [train](configs/recognition/mae/finetune_ucf101_mae_edlnokl.py) | [test](configs/recognition/mae/inference_mae_enn.py) | 83.21 / 88.98 | 96.91 | +|InternVideo-L + EDL |[ckpt](https://drive.google.com/file/d/1lW1mHCbyfi0tvIxAjVzjr3g-AgK61ND3/view?usp=share_link)| [train](configs/recognition/mae/finetune_ucf101_mae_edlnokl.py) | [test](configs/recognition/mae/inference_mae_enn.py) | 83.82 / 91.13 | 97.36 | +|InternVideo-H + EDL |[ckpt](https://drive.google.com/file/d/1lW1mHCbyfi0tvIxAjVzjr3g-AgK61ND3/view?usp=share_link)| [train](configs/recognition/mae/finetune_ucf101_mae_edlnokl.py) | [test](configs/recognition/mae/inference_mae_enn.py) | 85.48 / 91.85 | 97.89 | + +For the pretrained MAE model, please download it in the [Google Drive](https://drive.google.com/file/d/1iVb7c3onYPjIv5ResMRbIoxCVlsp5YCr/view?usp=share_link). + diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/bmn/README.md b/Downstream/Open-Set-Action-Recognition/configs/localization/bmn/README.md new file mode 100644 index 0000000..22f0fcc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/bmn/README.md @@ -0,0 +1,79 @@ +# BMN + +## Introduction +``` +@inproceedings{lin2019bmn, + title={Bmn: Boundary-matching network for temporal action proposal generation}, + author={Lin, Tianwei and Liu, Xiao and Li, Xin and Ding, Errui and Wen, Shilei}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + pages={3889--3898}, + year={2019} +} + +@article{zhao2017cuhk, + title={Cuhk \& ethz \& siat submission to activitynet challenge 2017}, + author={Zhao, Y and Zhang, B and Wu, Z and Yang, S and Zhou, L and Yan, S and Wang, L and Xiong, Y and Lin, D and Qiao, Y and others}, + journal={arXiv preprint arXiv:1710.08011}, + volume={8}, + year={2017} +} +``` + +## Model Zoo + +### ActivityNet feature + +|config |feature | gpus | AR@100| AUC | AP@0.5 | AP@0.75 | AP@0.95 | mAP | gpu_mem(M) | iter time(s) | ckpt | log| json| +|:-:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:-:|---|:-:|:-:|---| +|[bmn_400x100_9e_2x8_activitynet_feature](/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py) |cuhk_mean_100 |2|75.28|67.22|42.47|31.31|9.92|30.34|5420|3.27|[ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature_20200619-42a3b111.pth)| [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log)| [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_9e_activitynet_feature/bmn_400x100_9e_activitynet_feature.log.json)| +| |mmaction_video |2|75.43|67.22|42.62|31.56|10.86|30.77|5420|3.27|[ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809-c9fd14d2.pth)| [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_video/bmn_400x100_2x8_9e_mmaction_video_20200809.json) | +| |mmaction_clip |2|75.35|67.38|43.08|32.19|10.73|31.15|5420|3.27|[ckpt](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809-10d803ce.pth)| [log](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.log) | [json](https://download.openmmlab.com/mmaction/localization/bmn/bmn_400x100_2x8_9e_mmaction_clip/bmn_400x100_2x8_9e_mmaction_clip_20200809.json) | +| [BMN-official](https://github.com/JJBOY/BMN-Boundary-Matching-Network) (for reference)* |cuhk_mean_100 |-|75.27|67.49|42.22|30.98|9.22|30.00|-|-|-| - | - | + +- Notes: + +1. The **gpus** indicates the number of gpu we used to get the checkpoint. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk), mmaction_video and mmaction_clip denote feature extracted by mmaction, with video-level activitynet finetuned model or clip-level activitynet finetuned model respectively. +3. We evaluate the action detection performance of BMN, using [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) submission for ActivityNet2017 Untrimmed Video Classification Track to assign label for each action proposal. + +*We train BMN with the [official repo](https://github.com/JJBOY/BMN-Boundary-Matching-Network), evaluate its proposal generation and action detection performance with [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) for label assigning. + +For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train BMN model on ActivityNet features dataset. +```shell +python tools/train.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py +``` +For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting) . + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test BMN on ActivityNet feature dataset. +```shell +# Note: If evaluated, then please make sure the annotation file for test data contains groundtruth. +python tools/test.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json +``` +You can also test the action detection performance of the model, with [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) prediction file and generated proposal file (`results.json` in last command). +```shell +python tools/analysis/report_map.py --proposal path/to/proposal_file +``` + +Notes: +1. (Optional) You can use the following command to generate a formatted proposal file, which will be fed into the action classifier (Currently supports SSN and P-GCN, not including TSN, I3D etc.) to get the classification result of proposals. + ```shell + python tools/data/activitynet/convert_proposal_format.py + ``` + +For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) . diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py b/Downstream/Open-Set-Action-Recognition/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py new file mode 100644 index 0000000..753bc73 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py @@ -0,0 +1,105 @@ +# model settings +model = dict( + type='BMN', + temporal_dim=100, + boundary_ratio=0.5, + num_samples=32, + num_samples_per_bin=3, + feat_dim=400, + soft_nms_alpha=0.4, + soft_nms_low_threshold=0.5, + soft_nms_high_threshold=0.9, + post_process_top_k=100) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_val.json' + +test_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict( + type='Collect', + keys=['raw_feature'], + meta_name='video_meta', + meta_keys=[ + 'video_name', 'duration_second', 'duration_frame', 'annotations', + 'feature_frame' + ]), + dict(type='ToTensor', keys=['raw_feature']), +] +train_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='Collect', + keys=['raw_feature', 'gt_bbox'], + meta_name='video_meta', + meta_keys=['video_name']), + dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), + dict( + type='ToDataContainer', + fields=[dict(key='gt_bbox', stack=False, cpu_only=True)]) +] +val_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='Collect', + keys=['raw_feature', 'gt_bbox'], + meta_name='video_meta', + meta_keys=[ + 'video_name', 'duration_second', 'duration_frame', 'annotations', + 'feature_frame' + ]), + dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), + dict( + type='ToDataContainer', + fields=[dict(key='gt_bbox', stack=False, cpu_only=True)]) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=8, + train_dataloader=dict(drop_last=True), + val_dataloader=dict(videos_per_gpu=1), + test_dataloader=dict(videos_per_gpu=1), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + pipeline=test_pipeline, + data_prefix=data_root_val), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + pipeline=val_pipeline, + data_prefix=data_root_val), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + pipeline=train_pipeline, + data_prefix=data_root)) + +# optimizer +optimizer = dict( + type='Adam', lr=0.001, weight_decay=0.0001) # this lr is used for 2 gpus +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=7) + +total_epochs = 9 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=1, metrics=['AR@AN']) +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/' +load_from = None +resume_from = None +workflow = [('train', 1)] +output_config = dict(out=f'{work_dir}/results.json', output_format='json') diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/README.md b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/README.md new file mode 100644 index 0000000..69ce33e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/README.md @@ -0,0 +1,131 @@ +# BSN + +## Introduction +``` +@inproceedings{lin2018bsn, + title={Bsn: Boundary sensitive network for temporal action proposal generation}, + author={Lin, Tianwei and Zhao, Xu and Su, Haisheng and Wang, Chongjing and Yang, Ming}, + booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}, + pages={3--19}, + year={2018} +} +``` + +## Model Zoo + +### ActivityNet feature + +|config |feature | gpus| pretrain | AR@100| AUC | gpu_mem(M) | iter time(s) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:-:| +|bsn_400x100_1x16_20e_activitynet_feature |cuhk_mean_100 |1| None |74.65|66.45|41(TEM)+25(PEM)|0.074(TEM)+0.036(PEM)|[ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature_20200619-cd6accc3.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature_20200619-6111891d.pth)| [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log)| [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature/bsn_tem_400x100_1x16_20e_activitynet_feature.log.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature/bsn_pem_400x100_1x16_20e_activitynet_feature.log.json)| +| |mmaction_video |1| None |74.93|66.74|41(TEM)+25(PEM)|0.074(TEM)+0.036(PEM)|[ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809-ad6ec626.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809-aa861b26.pth)| [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_video/bsn_tem_400x100_1x16_20e_mmaction_video_20200809.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_video/bsn_pem_400x100_1x16_20e_mmaction_video_20200809.json) | +| |mmaction_clip |1| None |75.19|66.81|41(TEM)+25(PEM)|0.074(TEM)+0.036(PEM)|[ckpt_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809-0a563554.pth) [ckpt_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809-e32f61e6.pth)| [log_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.log) [log_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.log) | [json_tem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_tem_400x100_1x16_20e_mmaction_clip/bsn_tem_400x100_1x16_20e_mmaction_clip_20200809.json) [json_pem](https://download.openmmlab.com/mmaction/localization/bsn/bsn_pem_400x100_1x16_20e_mmaction_clip/bsn_pem_400x100_1x16_20e_mmaction_clip_20200809.json) | + +Notes: +1. The **gpus** indicates the number of gpu we used to get the checkpoint. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. For feature column, cuhk_mean_100 denotes the widely used cuhk activitynet feature extracted by [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk), mmaction_video and mmaction_clip denote feature extracted by mmaction, with video-level activitynet finetuned model or clip-level activitynet finetuned model respectively. + +For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following commands to train a model. + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Examples: + +1. train BSN(TEM) on ActivityNet features dataset. + ```shell + python tools/train.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py + ``` + +2. train BSN(PEM) on PGM results. + ```shell + python tools/train.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py + ``` + +For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Inference +You can use the following commands to inference a model. + +1. For TEM Inference + ```shell + # Note: This could not be evaluated. + python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] + ``` + +2. For PGM Inference + ```shell + python tools/bsn_proposal_generation.py ${CONFIG_FILE} [--mode ${MODE}] + ``` + +3. For PEM Inference + ```shell + python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] + ``` + +Examples: + +1. Inference BSN(TEM) with pretrained model. + ```shell + python tools/test.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth + ``` + +2. Inference BSN(PGM) with pretrained model. + ```shell + python tools/bsn_proposal_generation.py configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py --mode train + ``` + +3. Inference BSN(PEM) with evaluation metric 'AR@AN' and output the results. + ```shell + # Note: If evaluated, then please make sure the annotation file for test data contains groundtruth. + python tools/test.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json + ``` + +## Test +You can use the following commands to test a model. + +1. TEM + ```shell + # Note: This could not be evaluated. + python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] + ``` + +2. PGM + ```shell + python tools/bsn_proposal_generation.py ${CONFIG_FILE} [--mode ${MODE}] + ``` + +3. PEM + ```shell + python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] + ``` + +Examples: + +1. Test a TEM model on ActivityNet dataset. + ```shell + python tools/test.py configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth + ``` + +2. Test a PGM model on ActivityNet dataset. + ```shell + python tools/bsn_proposal_generation.py configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py --mode test + ``` + +3. Test a PEM model with with evaluation metric 'AR@AN' and output the results. + ```shell + python tools/test.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py checkpoints/SOME_CHECKPOINT.pth --eval AR@AN --out results.json + ``` +Notes: +1. (Optional) You can use the following command to generate a formatted proposal file, which will be fed into the action classifier (Currently supports only SSN and P-GCN, not including TSN, I3D etc.) to get the classification result of proposals. + ```shell + python tools/data/activitynet/convert_proposal_format.py + ``` + +For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py new file mode 100644 index 0000000..aff4f45 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='PEM', + pem_feat_dim=32, + pem_hidden_dim=256, + pem_u_ratio_m=1, + pem_u_ratio_l=2, + pem_high_temporal_iou_threshold=0.6, + pem_low_temporal_iou_threshold=2.2, + soft_nms_alpha=0.75, + soft_nms_low_threshold=0.65, + soft_nms_high_threshold=0.9, + post_process_top_k=100) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_val.json' + +work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' +pgm_proposals_dir = f'{work_dir}/pgm_proposals/' +pgm_features_dir = f'{work_dir}/pgm_features/' + +test_pipeline = [ + dict( + type='LoadProposals', + top_k=1000, + pgm_proposals_dir=pgm_proposals_dir, + pgm_features_dir=pgm_features_dir), + dict( + type='Collect', + keys=['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score'], + meta_name='video_meta', + meta_keys=[ + 'video_name', 'duration_second', 'duration_frame', 'annotations', + 'feature_frame' + ]), + dict(type='ToTensor', keys=['bsp_feature']) +] + +train_pipeline = [ + dict( + type='LoadProposals', + top_k=500, + pgm_proposals_dir=pgm_proposals_dir, + pgm_features_dir=pgm_features_dir), + dict( + type='Collect', + keys=['bsp_feature', 'reference_temporal_iou'], + meta_name='video_meta', + meta_keys=[]), + dict(type='ToTensor', keys=['bsp_feature', 'reference_temporal_iou']), + dict( + type='ToDataContainer', + fields=(dict(key='bsp_feature', stack=False), + dict(key='reference_temporal_iou', stack=False))) +] + +val_pipeline = [ + dict( + type='LoadProposals', + top_k=1000, + pgm_proposals_dir=pgm_proposals_dir, + pgm_features_dir=pgm_features_dir), + dict( + type='Collect', + keys=['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score'], + meta_name='video_meta', + meta_keys=[ + 'video_name', 'duration_second', 'duration_frame', 'annotations', + 'feature_frame' + ]), + dict(type='ToTensor', keys=['bsp_feature']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=8, + train_dataloader=dict(drop_last=True), + val_dataloader=dict(videos_per_gpu=1), + test_dataloader=dict(videos_per_gpu=1), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + pipeline=test_pipeline, + data_prefix=data_root_val), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + pipeline=val_pipeline, + data_prefix=data_root_val), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + pipeline=train_pipeline, + data_prefix=data_root)) + +# optimizer +optimizer = dict( + type='Adam', lr=0.01, weight_decay=0.00001) # this lr is used for 1 gpus + +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=10) + +total_epochs = 20 +checkpoint_config = dict(interval=1, filename_tmpl='pem_epoch_{}.pth') + +evaluation = dict(interval=1, metrics=['AR@AN']) + +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +output_config = dict(out=f'{work_dir}/results.json', output_format='json') diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py new file mode 100644 index 0000000..2c5f7a0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py @@ -0,0 +1,32 @@ +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_test.json' + +work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' +tem_results_dir = f'{work_dir}/tem_results/' +pgm_proposals_dir = f'{work_dir}/pgm_proposals/' +pgm_features_dir = f'{work_dir}/pgm_features/' + +temporal_scale = 100 +pgm_proposals_cfg = dict( + pgm_proposals_thread=8, temporal_scale=temporal_scale, peak_threshold=0.5) +pgm_features_test_cfg = dict( + pgm_features_thread=4, + top_k=1000, + num_sample_start=8, + num_sample_end=8, + num_sample_action=16, + num_sample_interp=3, + bsp_boundary_ratio=0.2) +pgm_features_train_cfg = dict( + pgm_features_thread=4, + top_k=500, + num_sample_start=8, + num_sample_end=8, + num_sample_action=16, + num_sample_interp=3, + bsp_boundary_ratio=0.2) diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py new file mode 100644 index 0000000..bcb1a18 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py @@ -0,0 +1,95 @@ +# model settings +model = dict( + type='TEM', + temporal_dim=100, + boundary_ratio=0.1, + tem_feat_dim=400, + tem_hidden_dim=512, + tem_match_threshold=0.5) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'ActivityNetDataset' +data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' +ann_file_train = 'data/ActivityNet/anet_anno_train.json' +ann_file_val = 'data/ActivityNet/anet_anno_val.json' +ann_file_test = 'data/ActivityNet/anet_anno_full.json' + +work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' +tem_results_dir = f'{work_dir}/tem_results/' + +test_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict( + type='Collect', + keys=['raw_feature'], + meta_name='video_meta', + meta_keys=['video_name']), + dict(type='ToTensor', keys=['raw_feature']) +] +train_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='Collect', + keys=['raw_feature', 'gt_bbox'], + meta_name='video_meta', + meta_keys=['video_name']), + dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), + dict(type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False)]) +] +val_pipeline = [ + dict(type='LoadLocalizationFeature'), + dict(type='GenerateLocalizationLabels'), + dict( + type='Collect', + keys=['raw_feature', 'gt_bbox'], + meta_name='video_meta', + meta_keys=['video_name']), + dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), + dict(type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False)]) +] + +data = dict( + videos_per_gpu=16, + workers_per_gpu=8, + train_dataloader=dict(drop_last=True), + val_dataloader=dict(videos_per_gpu=1), + test_dataloader=dict(videos_per_gpu=1), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + pipeline=test_pipeline, + data_prefix=data_root_val), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + pipeline=val_pipeline, + data_prefix=data_root_val), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + pipeline=train_pipeline, + data_prefix=data_root)) + +# optimizer +optimizer = dict( + type='Adam', lr=0.001, weight_decay=0.0001) # this lr is used for 1 gpus + +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict(policy='step', step=7) + +total_epochs = 20 +checkpoint_config = dict(interval=1, filename_tmpl='tem_epoch_{}.pth') + +log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1), ('val', 1)] +output_config = dict(out=tem_results_dir, output_format='csv') diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/README.md b/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/README.md new file mode 100644 index 0000000..c834dbd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/README.md @@ -0,0 +1,55 @@ +# SSN + +## Introduction +``` +@InProceedings{Zhao_2017_ICCV, +author = {Zhao, Yue and Xiong, Yuanjun and Wang, Limin and Wu, Zhirong and Tang, Xiaoou and Lin, Dahua}, +title = {Temporal Action Detection With Structured Segment Networks}, +booktitle = {Proceedings of the IEEE International Conference on Computer Vision (ICCV)}, +month = {Oct}, +year = {2017} +} +``` + +## Model Zoo + +| config | gpus | backbone | pretrain | mAP@0.3 | mAP@0.4 | mAP@0.5 | reference mAP@0.3 | reference mAP@0.4 | reference mAP@0.5 | gpu_mem(M) | ckpt | log | json | refrence ckpt | refrence json +|:-:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:-:|:-:|:-:|:-:|---|:--:|:--:| +|[ssn_r50_450e_thumos14_rgb](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) |8| ResNet50 | ImageNet |29.37|22.15|15.69|[27.61](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started)|[21.28](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started)|[14.57](https://github.com/open-mmlab/mmaction/tree/c7e3b7c11fb94131be9b48a8e3d510589addc3ce#Get%20started)|6352|[ckpt](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/ssn_r50_450e_thumos14_rgb_20201012-1920ab16.pth)| [log](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log)| [json](https://download.openmmlab.com/mmaction/localization/ssn/ssn_r50_450e_thumos14_rgb/20201005_144656.log.json)| [ckpt](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/ssn_r50_450e_thumos14_rgb_ref_20201014-b6f48f68.pth)| [json](https://download.openmmlab.com/mmaction/localization/ssn/mmaction_reference/ssn_r50_450e_thumos14_rgb_ref/20201008_103258.log.json)| + +- Notes: + +1. The **gpus** indicates the number of gpu we used to get the checkpoint. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. Since SSN utilizes different structured temporal pyramid pooling methods at training and testing, please refer to [ssn_r50_450e_thumos14_rgb_train](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) at training and [ssn_r50_450e_thumos14_rgb_test](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py) at testing. +3. We evaluate the action detection performance of SSN, using action proposals of TAG. For more details on data preparation, you can refer to thumos14 TAG proposals in [Data Preparation](/docs/data_preparation.md). +4. The reference SSN in is evaluated with `ResNet50` backbone in MMAction, which is the same backbone with ours. Note that the original setting of MMAction SSN uses the `BNInception` backbone. + +## Train + +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train SSN model on thumos14 dataset. +```shell +python tools/train.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py +``` +For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting) . + +## Test + +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test BMN on ActivityNet feature dataset. +```shell +# Note: If evaluated, then please make sure the annotation file for test data contains groundtruth. +python tools/test.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py checkpoints/SOME_CHECKPOINT.pth --eval mAP +``` + +For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) . diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py b/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py new file mode 100644 index 0000000..5915889 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py @@ -0,0 +1,110 @@ +# model training and testing settings +train_cfg = dict( + ssn=dict( + assigner=dict( + positive_iou_threshold=0.7, + background_iou_threshold=0.01, + incomplete_iou_threshold=0.3, + background_coverage_threshold=0.02, + incomplete_overlap_threshold=0.01), + sampler=dict( + num_per_video=8, + positive_ratio=1, + background_ratio=1, + incomplete_ratio=6, + add_gt_as_proposals=True), + loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1), + debug=False)) +test_cfg = dict( + ssn=dict( + sampler=dict(test_interval=6, batch_size=16), + evaluater=dict( + top_k=2000, + nms=0.2, + softmax_before_filter=True, + cls_score_dict=None, + cls_top_k=2))) +# model settings +model = dict( + type='SSN', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + partial_bn=True), + spatial_type='avg', + dropout_ratio=0.8, + cls_head=dict( + type='SSNHead', + dropout_ratio=0., + in_channels=2048, + num_classes=20, + consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)), + use_regression=True), + test_cfg=test_cfg) +# dataset settings +dataset_type = 'SSNDataset' +data_root = './data/thumos14/rawframes/' +data_root_val = './data/thumos14/rawframes/' +ann_file_train = 'data/thumos14/thumos14_tag_val_proposal_list.txt' +ann_file_val = 'data/thumos14/thumos14_tag_val_proposal_list.txt' +ann_file_test = 'data/thumos14/thumos14_tag_test_proposal_list.txt' +img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=True) +test_pipeline = [ + dict( + type='SampleProposalFrames', + clip_len=1, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5, + mode='test'), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(340, 256), keep_ratio=True), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict( + type='Collect', + keys=[ + 'imgs', 'relative_proposal_list', 'scale_factor_list', + 'proposal_tick_list', 'reg_norm_consts' + ], + meta_keys=[]), + dict( + type='ToTensor', + keys=[ + 'imgs', 'relative_proposal_list', 'scale_factor_list', + 'proposal_tick_list', 'reg_norm_consts' + ]) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root, + train_cfg=train_cfg, + test_cfg=test_cfg, + aug_ratio=0.5, + test_mode=True, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=1e-6) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[200, 400]) +checkpoint_config = dict(interval=5) +log_config = dict(interval=5, hooks=[dict(type='TextLoggerHook')]) +# runtime settings +total_epochs = 450 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ssn_r50_1x5_450e_thumos14_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py b/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py new file mode 100644 index 0000000..c64766c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py @@ -0,0 +1,155 @@ +# model training and testing settings +train_cfg = dict( + ssn=dict( + assigner=dict( + positive_iou_threshold=0.7, + background_iou_threshold=0.01, + incomplete_iou_threshold=0.3, + background_coverage_threshold=0.02, + incomplete_overlap_threshold=0.01), + sampler=dict( + num_per_video=8, + positive_ratio=1, + background_ratio=1, + incomplete_ratio=6, + add_gt_as_proposals=True), + loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1), + debug=False)) +test_cfg = dict( + ssn=dict( + sampler=dict(test_interval=6, batch_size=16), + evaluater=dict( + top_k=2000, + nms=0.2, + softmax_before_filter=True, + cls_score_dict=None, + cls_top_k=2))) +# model settings +model = dict( + type='SSN', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + partial_bn=True), + spatial_type='avg', + dropout_ratio=0.8, + loss_cls=dict(type='SSNLoss'), + cls_head=dict( + type='SSNHead', + dropout_ratio=0., + in_channels=2048, + num_classes=20, + consensus=dict( + type='STPPTrain', + stpp_stage=(1, 1, 1), + num_segments_list=(2, 5, 2)), + use_regression=True), + train_cfg=train_cfg) +# dataset settings +dataset_type = 'SSNDataset' +data_root = './data/thumos14/rawframes/' +data_root_val = './data/thumos14/rawframes/' +ann_file_train = 'data/thumos14/thumos14_tag_val_proposal_list.txt' +ann_file_val = 'data/thumos14/thumos14_tag_val_proposal_list.txt' +ann_file_test = 'data/thumos14/thumos14_tag_test_proposal_list.txt' +img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=True) +train_pipeline = [ + dict( + type='SampleProposalFrames', + clip_len=1, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(340, 256), keep_ratio=True), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NPTCHW'), + dict( + type='Collect', + keys=[ + 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', + 'proposal_type' + ], + meta_keys=[]), + dict( + type='ToTensor', + keys=[ + 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', + 'proposal_type' + ]) +] +val_pipeline = [ + dict( + type='SampleProposalFrames', + clip_len=1, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(340, 256), keep_ratio=True), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NPTCHW'), + dict( + type='Collect', + keys=[ + 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', + 'proposal_type' + ], + meta_keys=[]), + dict( + type='ToTensor', + keys=[ + 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', + 'proposal_type' + ]) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + train_cfg=train_cfg, + test_cfg=test_cfg, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5, + test_mode=False, + verbose=True, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root, + train_cfg=train_cfg, + test_cfg=test_cfg, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5, + test_mode=False, + pipeline=val_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=1e-6) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[200, 400]) +checkpoint_config = dict(interval=5) +log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')]) +# runtime settings +total_epochs = 450 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ssn_r50_1x5_450e_thumos14_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = True diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/README.md new file mode 100644 index 0000000..9c5c724 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/README.md @@ -0,0 +1,71 @@ +# C3D + +## Introduction +``` +@ARTICLE{2014arXiv1412.0767T, +author = {Tran, Du and Bourdev, Lubomir and Fergus, Rob and Torresani, Lorenzo and Paluri, Manohar}, +title = "{Learning Spatiotemporal Features with 3D Convolutional Networks}", +keywords = {Computer Science - Computer Vision and Pattern Recognition}, +year = 2014, +month = dec, +eid = {arXiv:1412.0767} +} + +@article{Tran2014C3DGF, + title={C3D: Generic Features for Video Analysis}, + author={D. Tran and Lubomir D. Bourdev and R. Fergus and L. Torresani and Manohar Paluri}, + journal={ArXiv}, + year={2014}, + volume={abs/1412.0767} +} +``` + +## Model Zoo + +### UCF-101 + +| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol| inference_time(video/s) | gpu_mem(M) | ckpt | log | json | +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[c3d_sports1m_16x1x1_45e_ucf101_rgb.py](/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py)|128x171|8| c3d | sports1m | 83.27 | 95.90 | 10 clips x 1 crop | x | 6053 | [ckpt](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/c3d_sports1m_16x1x1_45e_ucf101_rgb_20201021-26655025.pth)|[log](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log)|[json](https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb/20201021_140429.log.json)| + +Notes: + +1. The author of C3D normalized UCF-101 with volume mean and used SVM to classify videos, while we normalized the dataset with RGB mean value and used a linear classifier. +2. The **gpus** indicates the number of gpu (32G V100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +3. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. + +For more details on data preparation, you can refer to UCF-101 in [Data Preparation](/docs/data_preparation.md). + + +## Train + +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train C3D model on UCF-101 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test + +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test C3D model on UCF-101 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py new file mode 100644 index 0000000..1b05441 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py @@ -0,0 +1,118 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='C3D', + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', # noqa: E501 + style='pytorch', + conv_cfg=dict(type='Conv3d'), + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dropout_ratio=0.5, + init_std=0.005), + cls_head=dict( + type='I3DHead', + num_classes=101, + in_channels=4096, + spatial_type=None, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ucf101/rawframes' +data_root_val = 'data/ucf101/rawframes' +split = 1 # official train/test splits. valid numbers: 1, 2, 3 +ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt' +ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' +ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' +img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=16, frame_interval=1, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(128, 171)), + dict(type='RandomCrop', size=112), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=16, + frame_interval=1, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(128, 171)), + dict(type='CenterCrop', crop_size=112), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=16, + frame_interval=1, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(128, 171)), + dict(type='CenterCrop', crop_size=112), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +data = dict( + videos_per_gpu=30, + workers_per_gpu=2, + test_dataloader=dict(videos_per_gpu=1), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=0.0005) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 45 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/c3d_sports1m_16x1x1_45e_ucf101_split_{split}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/README.md new file mode 100644 index 0000000..55185fe --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/README.md @@ -0,0 +1,73 @@ +# CSN + +## Introduction +``` +@inproceedings{inproceedings, +author = {Wang, Heng and Feiszli, Matt and Torresani, Lorenzo}, +year = {2019}, +month = {10}, +pages = {5551-5560}, +title = {Video Classification With Channel-Separated Convolutional Networks}, +doi = {10.1109/ICCV.2019.00565} +} + +@inproceedings{ghadiyaram2019large, + title={Large-scale weakly-supervised pre-training for video action recognition}, + author={Ghadiyaram, Deepti and Tran, Du and Mahajan, Dhruv}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, + pages={12046--12055}, + year={2019} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone |pretrain| top1 acc| top5 acc | inference_time(video/s) | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py](/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py)|short-side 320|8x4| ResNet152 | IG65M|80.14|94.93|x|8517|[ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20200803-fc66ce8d.pth)|[log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log)|[json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb/20200728_031952.log.json)| +|[ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py](/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py)|short-side 320|8x4| ResNet152 | IG65M|82.76|95.68|x|8516|[ckpt](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth)|[log](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log)|[json](https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/20200809_053132.log.json)| + +Notes: + +1. The **gpus** indicates the number of gpu (32G V100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. +3. The values in columns named after "reference" are the results got by training on the original repo, using the same model settings. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Train + +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train CSN model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py \ + --work-dir work_dirs/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test + +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test CSN model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_dnn.py new file mode 100644 index 0000000..dd67a53 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_dnn.py @@ -0,0 +1,127 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dCSN', + pretrained2d=False, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 + depth=152, + with_pool2=False, + bottleneck_mode='ir', + norm_eval=True, + bn_frozen=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.000125, momentum=0.9, + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + step=[20, 40], + warmup='linear', + warmup_ratio=0.1, + warmup_by_epoch=True, + warmup_iters=5) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook')]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/csn/finetune_ucf101_csn_dnn' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth' +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = True diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_edlnokl_avuc_debias.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_edlnokl_avuc_debias.py new file mode 100644 index 0000000..6d2268f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_edlnokl_avuc_debias.py @@ -0,0 +1,144 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dCSN', + pretrained2d=False, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 + depth=152, + with_pool2=False, + bottleneck_mode='ir', + norm_eval=True, + bn_frozen=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=2048, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.000125, momentum=0.9, + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + step=[20, 40], + warmup='linear', + warmup_ratio=0.1, + warmup_by_epoch=True, + warmup_iters=5) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook')]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/csn/finetune_ucf101_csn_edlnokl_avuc_debias' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth' +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = True diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_dnn.py new file mode 100644 index 0000000..e442efb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_dnn.py @@ -0,0 +1,52 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dCSN', + pretrained2d=False, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 + depth=152, + with_pool2=False, + bottleneck_mode='ir', + norm_eval=True, + bn_frozen=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_enn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_enn.py new file mode 100644 index 0000000..439e755 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_enn.py @@ -0,0 +1,58 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dCSN', + pretrained2d=False, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 + depth=152, + with_pool2=False, + bottleneck_mode='ir', + norm_eval=True, + bn_frozen=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + annealing_method='exp'), + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py new file mode 100644 index 0000000..d833fa5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py @@ -0,0 +1,124 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dCSN', + pretrained2d=False, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 + depth=152, + with_pool2=False, + bottleneck_mode='ir', + norm_eval=True, + bn_frozen=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=3, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.000125, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + step=[32, 48], + warmup='linear', + warmup_ratio=0.1, + warmup_by_epoch=True, + warmup_iters=16) +total_epochs = 58 +checkpoint_config = dict(interval=2) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook')]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb' # noqa: E501 +load_from = None +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = True diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py new file mode 100644 index 0000000..76f99e0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dCSN', + pretrained2d=False, + pretrained= # noqa: E251 + 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 + depth=152, + with_pool2=False, + bottleneck_mode='ir', + norm_eval=False, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=3, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.000125, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + step=[32, 48], + warmup='linear', + warmup_ratio=0.1, + warmup_by_epoch=True, + warmup_iters=16) +total_epochs = 58 +checkpoint_config = dict(interval=2) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook')]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/README.md new file mode 100644 index 0000000..a98d62d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/README.md @@ -0,0 +1,76 @@ +# I3D + +## Introduction +``` +@inproceedings{inproceedings, + author = {Carreira, J. and Zisserman, Andrew}, + year = {2017}, + month = {07}, + pages = {4724-4733}, + title = {Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset}, + doi = {10.1109/CVPR.2017.502} +} + +@article{NonLocal2018, + author = {Xiaolong Wang and Ross Girshick and Abhinav Gupta and Kaiming He}, + title = {Non-local Neural Networks}, + journal = {CVPR}, + year = {2018} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone |pretrain| top1 acc| top5 acc | inference_time(video/s) | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[i3d_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py) |340x256|8| ResNet50|ImageNet |72.68|90.78|1.7 (320x3 frames)| 5170|[ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/i3d_r50_32x2x1_100e_kinetics400_rgb_20200614-c25ef9a4.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log)| [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb/20200614_060456.log.json)| +|[i3d_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50|ImageNet | 73.27|90.92|x|5170|[ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_256p_32x2x1_100e_kinetics400_rgb_20200801-7d9f44de.pth)|[log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log)|[json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_256p_32x2x1_100e_kinetics400_rgb/20200725_031555.log.json)| +|[i3d_r50_video_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py)|short-side 256p|8| ResNet50 |ImageNet|72.85 |90.75 |x|5170|[ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/i3d_r50_video_32x2x1_100e_kinetics400_rgb_20200826-e31c6f52.pth)|[log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log)|[json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb/20200706_143014.log.json)| +|[i3d_r50_dense_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py) |340x256|8x2| ResNet50| ImageNet|72.77|90.57|1.7 (320x3 frames)| 5170| [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_32x2x1_100e_kinetics400_rgb_20200616-2bbb4361.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log)| [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/20200616_230011.log.json)| +|[i3d_r50_dense_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50| ImageNet|73.48|91.00|x| 5170| [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth)|[log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log)|[json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/20200725_031604.log.json)| +|[i3d_r50_lazy_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py) |340x256|8| ResNet50 |ImageNet|72.32|90.72|1.8 (320x3 frames)| 5170| [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_32x2x1_100e_kinetics400_rgb_20200612-000e4d2a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log)| [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_32x2x1_100e_kinetics400_rgb/20200612_233836.log.json)| +|[i3d_r50_lazy_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50| ImageNet|73.24|90.99|x| 5170| [ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb_20200817-4e90d1d5.pth)| [log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log) | [json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_fast_256p_32x2x1_100e_kinetics400_rgb/20200725_031457.log.json) | +|[i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py)|short-side 256p|8x4| ResNet50 |ImageNet|74.71|91.81|x|6438|[ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200813-6e6aef1b.pth)|[log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log)|[json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034054.log.json)| +|[i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py)|short-side 256p|8x4| ResNet50 |ImageNet|73.37|91.26|x|4944|[ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb_20200815-17f84aa2.pth)|[log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log)|[json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/20200813_034909.log.json)| +|[i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb](/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py)|short-side 256p|8x4| ResNet50 |ImageNet|73.92|91.59|x|4832|[ckpt](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb_20200814-7c30d5bb.pth)|[log](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log)|[json](https://download.openmmlab.com/mmaction/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/20200814_044208.log.json)| + +Notes: +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train I3D model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py \ + --work-dir work_dirs/i3d_r50_32x2x1_100e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test I3D model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_bnn.py new file mode 100644 index 0000000..62f3d6a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_bnn.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer3DBNN', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DBNNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0, + init_std=0.01)) +# model training and testing settings +train_cfg = dict(loss_weight=1e-6, npass=2) +test_cfg = dict(average_clips='prob', npass=10) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 8 for training + workers_per_gpu=4, # set to 4 for training + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_i3d_bnn/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_dnn.py new file mode 100644 index 0000000..92859fc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_dnn.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 8 for training + workers_per_gpu=4, # set to 4 for training + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_i3d_dnn/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl.py new file mode 100644 index 0000000..f2157f4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl.py @@ -0,0 +1,137 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=False, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB + workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_i3d_edlnokl/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py new file mode 100644 index 0000000..d166647 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py @@ -0,0 +1,145 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=2048, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB + workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_i3d_edlnokl_avuc_debias/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_rpl.py new file mode 100644 index 0000000..e96d7f7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_rpl.py @@ -0,0 +1,131 @@ +# model settings +model = dict( + type='Recognizer3DRPL', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 8 for training + workers_per_gpu=4, # set to 4 for training + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_i3d_rpl/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..878bc7d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)), + non_local_cfg=dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='dot_product'), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..73454b4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)), + non_local_cfg=dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='embedded_gaussian'), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/' # noqa: E501 +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..5071f98 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)), + non_local_cfg=dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='gaussian'), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..5d279f2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_r50_32x2x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..962ca81 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..38fa186 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + with_pool2=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..d6d969d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode', decoding_backend='turbojpeg'), + dict(type='Resize', scale=(-1, 256), lazy=True), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0, + lazy=True), + dict(type='Resize', scale=(224, 224), keep_ratio=False, lazy=True), + dict(type='Flip', flip_ratio=0.5, lazy=True), + dict(type='Fuse'), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode', decoding_backend='turbojpeg'), + dict(type='Resize', scale=(-1, 256), lazy=True), + dict(type='CenterCrop', crop_size=224, lazy=True), + dict(type='Flip', flip_ratio=0, lazy=True), + dict(type='Fuse'), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode', decoding_backend='turbojpeg'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..1a27f2f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_r50_video_3d_32x2x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..11ee7de --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + with_pool2=True, + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py new file mode 100644 index 0000000..9e5e83a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='DecordInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_bnn.py new file mode 100644 index 0000000..df9c473 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_bnn.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer3DBNN', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DBNNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob', npass=10) +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_dnn.py new file mode 100644 index 0000000..f7c93e1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_dnn.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_enn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_enn.py new file mode 100644 index 0000000..034eda2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_enn.py @@ -0,0 +1,58 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + # pretrained='work_dirs/i3d/finetune_ucf101_i3d_edlnokl/latest.pth', + # pretrained=False, + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + annealing_method='exp'), + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_rpl.py new file mode 100644 index 0000000..f364ee4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_rpl.py @@ -0,0 +1,52 @@ +# model settings +model = dict( + type='Recognizer3DRPL', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + num_classes=101, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR.py new file mode 100644 index 0000000..4d6cd68 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR.py @@ -0,0 +1,145 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=10, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=evidence_loss, + num_classes=10, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=10, + in_channels=2048, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics10/videos_train' +data_root_val = 'data/kinetics10/videos_val' +ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' +ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' +ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB + workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/train_kinetics10_i3d_DEAR/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR_noDebias.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR_noDebias.py new file mode 100644 index 0000000..d4f2700 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR_noDebias.py @@ -0,0 +1,137 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=10, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3d', + pretrained2d=True, + pretrained='torchvision://resnet50', + depth=50, + conv_cfg=dict(type='Conv3d'), + norm_eval=False, + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + loss_cls=evidence_loss, + num_classes=10, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics10/videos_train' +data_root_val = 'data/kinetics10/videos_val' +ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' +ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' +ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB + workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/train_kinetics10_i3d_DEAR_noDebias/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/mae/finetune_ucf101_mae_edlnokl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/mae/finetune_ucf101_mae_edlnokl.py new file mode 100644 index 0000000..4475381 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/mae/finetune_ucf101_mae_edlnokl.py @@ -0,0 +1,177 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=False, + annealing_method='exp') + +# mae huge ------------ +model = dict( + type='Recognizer3D', + backbone=dict( + type='VisionTransformer3D', + patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + num_classes=0, + pretrained='work_dirs/mae/finetune_ucf101_mae_dnn/huangbingkun/model/vit_h_hybridv2_pt_1200e_k700_ft_rep_2.pth' + ), + cls_head=dict( + type='BaseClsHead', + loss_cls=evidence_loss, + in_channels=1280, + num_classes=101, + dropout_ratio=0.5, + )) + + +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + # dict(type='OpenCVInit', num_threads=1), + dict(type='DecordInit'), + dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), + # dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=32), + # dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + # dict(type='OpenCVDecode'), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + # dict(type='OpenCVInit', num_threads=1), + dict(type='DecordInit'), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + # dict( + # type='SampleFrames', + # clip_len=1, + # frame_interval=1, + # num_clips=32, + # test_mode=True), + # dict( + # type='SampleFrames', + # clip_len=32, + # frame_interval=2, + # num_clips=1, + # test_mode=True), + # dict(type='OpenCVDecode'), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + # dict(type='OpenCVInit', num_threads=1), + dict(type='DecordInit'), + dict( + type='DenseSampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + # dict( + # type='SampleFrames', + # clip_len=1, + # frame_interval=1, + # num_clips=32, + # test_mode=True), + # dict( + # type='SampleFrames', + # clip_len=32, + # frame_interval=2, + # num_clips=1, + # test_mode=True), + # dict(type='OpenCVDecode'), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + # dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, # set to 2 for evaluation on GPU with 24GB + workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + start_index=0, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 + weight_decay=0.0001, nesterov=True) + +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=60, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_mae_edlnokl/' +# load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/mae/inference_mae_enn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/mae/inference_mae_enn.py new file mode 100644 index 0000000..2b03c7c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/mae/inference_mae_enn.py @@ -0,0 +1,71 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=False, + annealing_method='exp') + +# mae huge +model = dict( + type='Recognizer3D', + backbone=dict( + type='VisionTransformer3D', + patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + num_classes=0, + # pretrained='work_dirs/mae/finetune_ucf101_mae_dnn/huangbingkun/model/vit_h_hybridv2_pt_1200e_k700_ft_rep_2.pth' + ), + cls_head=dict( + type='BaseClsHead', + loss_cls=evidence_loss, + in_channels=1280, + num_classes=101, + dropout_ratio=0.5, + )) + +# model training and testing settings +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + # dict( + # type='SampleFrames', + # clip_len=1, + # frame_interval=1, + # num_clips=32, + # test_mode=True), + # dict(type='OpenCVDecode'), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + # dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=4, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/README.md new file mode 100644 index 0000000..00ea48d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/README.md @@ -0,0 +1,116 @@ +# Omni-sourced Webly-supervised Learning for Video Recognition + +[Haodong Duan](https://github.com/kennymckormick), [Yue Zhao](https://github.com/zhaoyue-zephyrus), [Yuanjun Xiong](https://github.com/yjxiong), Wentao Liu, [Dahua Lin](https://github.com/lindahua) + +In ECCV, 2020. [Paper](https://arxiv.org/abs/2003.13042) + +![pipeline](pipeline.png) + +### Kinetics-400 Model Release + +We currently released 4 models trained with OmniSource framework, including both 2D and 3D architectures. We compare the performance of models trained with or without OmniSource in the following table. + +| Model | Modality | Pretrained | Backbone | Input | Resolution | Top-1 (Baseline / OmniSource (Delta)) | Top-5 (Baseline / OmniSource (Delta))) | Download | +| :------: | :------: | :--------: | :-------: | :---: | :------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------------------------: | +| TSN | RGB | ImageNet | ResNet50 | 3seg | 340x256 | 70.6 / 73.6 (+ 3.0) | 89.4 / 91.0 (+ 1.6) | [Baseline](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_imagenet_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-54192355.pth) | +| TSN | RGB | IG-1B | ResNet50 | 3seg | short-side 320 | 73.1 / 75.7 (+ 2.6) | 90.4 / 91.9 (+ 1.5) | [Baseline](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_without_omni_1x1x3_kinetics400_rgb_20200926-c133dd49.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-2863fed0.pth) | +| SlowOnly | RGB | Scratch | ResNet50 | 4x16 | short-side 320 | 72.9 / 76.8 (+ 3.9) | 90.9 / 92.5 (+ 1.6) | [Baseline](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r50_omni_4x16x1_kinetics400_rgb_20200926-51b1f7ea.pth) | +| SlowOnly | RGB | Scratch | ResNet101 | 8x8 | short-side 320 | 76.5 / 80.4 (+ 3.9) | 92.7 / 94.4 (+ 1.7) | [Baseline](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_without_omni_8x8x1_kinetics400_rgb_20200926-0c730aef.pth) / [OmniSource](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_omni_8x8x1_kinetics400_rgb_20200926-b5dbb701.pth) | + +### Benchmark on Mini-Kinetics + +We release a subset of web dataset used in the OmniSource paper. Specifically, we release the web data in the 200 classes of [Mini-Kinetics](https://arxiv.org/pdf/1712.04851.pdf). The statistics of those datasets is detailed in [preparing_omnisource](/tools/data/omnisource/README.md). To obtain those data, you need to fill in a [data request form](https://docs.google.com/forms/d/e/1FAIpQLSd8_GlmHzG8FcDbW-OEu__G7qLgOSYZpH-i5vYVJcu7wcb_TQ/viewform?usp=sf_link). After we received your request, the download link of these data will be send to you. For more details on the released OmniSource web dataset, please refer to [preparing_omnisource](/tools/data/omnisource/README.md). + +We benchmark the OmniSource framework on the released subset, results are listed in the following table (we report the Top-1 and Top-5 accuracy on Mini-Kinetics validation). The cbenchmark can be used as a baseline for video recognition with web data. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelBaseline+GG-img+[GG-IG]-img+IG-vid+KRawOmniSource
TSN-8seg
ResNet50
77.4 / 93.678.0 / 93.678.6 / 93.680.6 / 95.078.6 / 93.281.3 / 94.8
ckptjsonlogckptjsonlogckptjsonlogckptjsonlogckptjsonlogckptjsonlog
SlowOnly-8x8
ResNet50
78.6 / 93.980.8 / 95.081.3 / 95.282.4 / 95.680.3 / 94.582.9 / 95.8
ckptjsonlogckptjsonlogckptjsonlogckptjsonlogckptjsonlogckptjsonlog
+ +We also list the benchmark in the original paper which run on Kinetics-400 for comparison: + +| Model | Baseline | +GG-img | +[GG-IG]-img | +IG-vid | +KRaw | OmniSource | +| :--------------------: | :---------: | :---------: | :----------: | :---------: | :---------: | :---------: | +| TSN-3seg-ResNet50 | 70.6 / 89.4 | 71.5 / 89.5 | 72.0 / 90.0 | 72.0 / 90.3 | 71.7 / 89.6 | 73.6 / 91.0 | +| SlowOnly-4x16-ResNet50 | 73.8 / 90.9 | 74.5 / 91.4 | 75.2 / 91.6 | 75.2 / 91.7 | 74.5 / 91.1 | 76.6 / 92.5 | + +### Citing OmniSource + +If you find OmniSource useful for your research, please consider citing the paper using the following BibTeX entry. + +``` +@article{duan2020omni, + title={Omni-sourced Webly-supervised Learning for Video Recognition}, + author={Duan, Haodong and Zhao, Yue and Xiong, Yuanjun and Liu, Wentao and Lin, Dahua}, + journal={arXiv preprint arXiv:2003.13042}, + year={2020} +} +``` diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/pipeline.png b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/pipeline.png new file mode 100644 index 0000000..a3e3a2a Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/pipeline.png differ diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py new file mode 100644 index 0000000..2dd3002 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py @@ -0,0 +1,149 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=200, + spatial_type='avg', + dropout_ratio=0.5)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +gg_root = 'data/OmniSource/googleimage_200' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_gg = ('data/OmniSource/annotations/googleimage_200/' + 'tsn_8seg_googleimage_200_wodup.txt') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_gg_pipeline = [ + dict(type='ImageDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='BuildPseudoClip', clip_len=8), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='ImageDataset', + ann_file=ann_file_gg, + data_prefix=gg_root, + pipeline=train_gg_pipeline) + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=8) +workflow = [('train', 1)] +evaluation = dict( + interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py new file mode 100644 index 0000000..6725002 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py @@ -0,0 +1,153 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=200, + spatial_type='avg', + dropout_ratio=0.5)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +iv_root = 'data/OmniSource/insvideo_200' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' + 'slowonly_8x8_insvideo_200_wodup.txt') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_iv_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type=dataset_type, + ann_file=ann_file_iv, + data_prefix=iv_root, + pipeline=train_iv_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5) + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=8) +workflow = [('train', 1)] +evaluation = dict( + interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py new file mode 100644 index 0000000..f0ccd57 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py @@ -0,0 +1,152 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=200, + spatial_type='avg', + dropout_ratio=0.5)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +kraw_root = 'data/OmniSource/kinetics_raw_200_train' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' + 'slowonly_8x8_kinetics_raw_200.json') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_kraw_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='RawVideoDataset', + ann_file=ann_file_kraw, + data_prefix=kraw_root, + pipeline=train_kraw_pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='positive') + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=8) +workflow = [('train', 1)] +evaluation = dict( + interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb.py new file mode 100644 index 0000000..9aeed38 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb.py @@ -0,0 +1,200 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=200, + spatial_type='avg', + dropout_ratio=0.5)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +web_root = 'data/OmniSource/' +iv_root = 'data/OmniSource/insvideo_200' +kraw_root = 'data/OmniSource/kinetics_raw_200_train' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_web = ('data/OmniSource/annotations/webimage_200/' + 'tsn_8seg_webimage_200_wodup.txt') +ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' + 'slowonly_8x8_insvideo_200_wodup.txt') +ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' + 'slowonly_8x8_kinetics_raw_200.json') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_web_pipeline = [ + dict(type='ImageDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='BuildPseudoClip', clip_len=8), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_iv_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_kraw_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=1, + train_ratio=[2, 1, 1, 1], + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='ImageDataset', + ann_file=ann_file_web, + data_prefix=web_root, + pipeline=train_web_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5), + dict( + type=dataset_type, + ann_file=ann_file_iv, + data_prefix=iv_root, + pipeline=train_iv_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5), + dict( + type='RawVideoDataset', + ann_file=ann_file_kraw, + data_prefix=kraw_root, + pipeline=train_kraw_pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='positive') + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=8) +workflow = [('train', 1)] +evaluation = dict( + interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py new file mode 100644 index 0000000..5b12fac --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=200, + spatial_type='avg', + dropout_ratio=0.5)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=8) +workflow = [('train', 1)] +evaluation = dict( + interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py new file mode 100644 index 0000000..62fafcf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py @@ -0,0 +1,152 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=200, + spatial_type='avg', + dropout_ratio=0.5)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +web_root = 'data/OmniSource/' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_web = ('data/OmniSource/annotations/webimage_200/' + 'tsn_8seg_webimage_200_wodup.txt') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_web_pipeline = [ + dict(type='ImageDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='BuildPseudoClip', clip_len=8), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='ImageDataset', + ann_file=ann_file_web, + data_prefix=web_root, + pipeline=train_web_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5) + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=8) +workflow = [('train', 1)] +evaluation = dict( + interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py new file mode 100644 index 0000000..3599651 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py @@ -0,0 +1,146 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +omnisource = True +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +gg_root = 'data/OmniSource/googleimage_200' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_gg = ('data/OmniSource/annotations/googleimage_200/' + 'tsn_8seg_googleimage_200_wodup.txt') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_gg_pipeline = [ + dict(type='ImageDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + omni_videos_per_gpu=[12, 64], + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='ImageDataset', + ann_file=ann_file_gg, + data_prefix=gg_root, + pipeline=train_gg_pipeline) + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py new file mode 100644 index 0000000..9a51b51 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py @@ -0,0 +1,150 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +omnisource = True +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +iv_root = 'data/OmniSource/insvideo_200' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' + 'slowonly_8x8_insvideo_200_wodup.txt') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_iv_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type=dataset_type, + ann_file=ann_file_iv, + data_prefix=iv_root, + pipeline=train_iv_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5) + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py new file mode 100644 index 0000000..a70b4ff --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py @@ -0,0 +1,149 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +omnisource = True +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +kraw_root = 'data/OmniSource/kinetics_raw_200_train' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' + 'slowonly_8x8_kinetics_raw_200.json') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_kraw_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='RawVideoDataset', + ann_file=ann_file_kraw, + data_prefix=kraw_root, + pipeline=train_kraw_pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='positive') + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py new file mode 100644 index 0000000..f084944 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py @@ -0,0 +1,197 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +omnisource = True +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +web_root = 'data/OmniSource/' +iv_root = 'data/OmniSource/insvideo_200' +kraw_root = 'data/OmniSource/kinetics_raw_200_train' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_web = ('data/OmniSource/annotations/webimage_200/' + 'tsn_8seg_webimage_200_wodup.txt') +ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' + 'slowonly_8x8_insvideo_200_wodup.txt') +ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' + 'slowonly_8x8_kinetics_raw_200.json') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_web_pipeline = [ + dict(type='ImageDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_iv_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_kraw_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + omni_videos_per_gpu=[12, 64, 12, 12], + train_ratio=[2, 1, 1, 1], + workers_per_gpu=1, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='ImageDataset', + ann_file=ann_file_web, + data_prefix=web_root, + pipeline=train_web_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5), + dict( + type=dataset_type, + ann_file=ann_file_iv, + data_prefix=iv_root, + pipeline=train_iv_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5), + dict( + type='RawVideoDataset', + ann_file=ann_file_kraw, + data_prefix=kraw_root, + pipeline=train_kraw_pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='positive') + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py new file mode 100644 index 0000000..00d390c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py new file mode 100644 index 0000000..4502bdc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py @@ -0,0 +1,149 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +omnisource = True +# dataset settings +dataset_type = 'VideoDataset' +# The flag indicates using joint training +omnisource = True + +data_root = 'data/OmniSource/kinetics_200_train' +data_root_val = 'data/OmniSource/kinetics_200_val' +web_root = 'data/OmniSource/' + +ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' +ann_file_web = ('data/OmniSource/annotations/webimage_200/' + 'tsn_8seg_webimage_200_wodup.txt') + +ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' +ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +train_web_pipeline = [ + dict(type='ImageDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=12, + omni_videos_per_gpu=[12, 64], + workers_per_gpu=2, + train=[ + dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + dict( + type='ImageDataset', + ann_file=ann_file_web, + data_prefix=web_root, + pipeline=train_web_pipeline, + num_classes=200, + sample_by_class=True, + power=0.5) + ], + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/omnisource/' + 'tsn_r50_1x1x8_100e_minikinetics_webimage_rgb') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/README.md new file mode 100644 index 0000000..51d64cb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/README.md @@ -0,0 +1,62 @@ +# R2plus1D + +## Introduction +``` +@inproceedings{tran2018closer, + title={A closer look at spatiotemporal convolutions for action recognition}, + author={Tran, Du and Wang, Heng and Torresani, Lorenzo and Ray, Jamie and LeCun, Yann and Paluri, Manohar}, + booktitle={Proceedings of the IEEE conference on Computer Vision and Pattern Recognition}, + pages={6450--6459}, + year={2018} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone | pretrain| top1 acc| top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[r2plus1d_r34_8x8x1_180e_kinetics400_rgb](/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py) | short-side 256|8x4| ResNet34|None |67.30|87.65|x|5019|[ckpt](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_256p_8x8x1_180e_kinetics400_rgb/r2plus1d_r34_256p_8x8x1_180e_kinetics400_rgb_20200729-aa94765e.pth)|[log](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_256p_8x8x1_180e_kinetics400_rgb/20200728_021421.log)|[json](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_256p_8x8x1_180e_kinetics400_rgb/20200728_021421.log.json)| +|[r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb](/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py) | short-side 256|8| ResNet34|None |67.3|87.8|x|5019|[ckpt](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb_20200826-ab35a529.pth)|[log](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb/20200724_201360.log.json)|[json](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb/20200724_201360.log)| +|[r2plus1d_r34_8x8x1_180e_kinetics400_rgb](/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py) | short-side 320|8x2| ResNet34|None |68.68|88.36|1.6 (80x3 frames)|5019|[ckpt](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb/r2plus1d_r34_8x8x1_180e_kinetics400_rgb_20200618-3fce5629.pth)| [log](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb/r21d_8x8.log)| [json](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb/r2plus1d_r34_8x8_69.58_88.36.log.json)| +|[r2plus1d_r34_32x2x1_180e_kinetics400_rgb](/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py) |short-side 320|8x2| ResNet34|None |74.60|91.59|0.5 (320x3 frames)|12975| [ckpt](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb/r2plus1d_r34_32x2x1_180e_kinetics400_rgb_20200618-63462eb3.pth) | [log](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb/r21d_32x2.log)| [json](https://download.openmmlab.com/mmaction/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb/r2plus1d_r34_32x2_74.6_91.6.log.json)| + +Notes: +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train R(2+1)D model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py \ + --work-dir work_dirs/r2plus1d_r34_3d_8x8x1_180e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test R(2+1)D model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips=prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py new file mode 100644 index 0000000..0ef54e5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py @@ -0,0 +1,126 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet2Plus1d', + depth=34, + pretrained=None, + pretrained2d=False, + norm_eval=False, + conv_cfg=dict(type='Conv2plus1d'), + norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(1, 1, 1, 1), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 2, 2, 2), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=512, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline, + test_mode=True)) +# optimizer +optimizer = dict( + type='SGD', lr=0.075, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 180 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/r2plus1d_r34_3d_32x2x1_180e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py new file mode 100644 index 0000000..8b540d0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py @@ -0,0 +1,126 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet2Plus1d', + depth=34, + pretrained=None, + pretrained2d=False, + norm_eval=False, + conv_cfg=dict(type='Conv2plus1d'), + norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(1, 1, 1, 1), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 2, 2, 2), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=512, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline, + test_mode=True)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 180 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/r2plus1d_r34_8x8x1_180e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py new file mode 100644 index 0000000..8282b94 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py @@ -0,0 +1,130 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet2Plus1d', + depth=34, + pretrained=None, + pretrained2d=False, + norm_eval=False, + conv_cfg=dict(type='Conv2plus1d'), + norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), + act_cfg=dict(type='ReLU'), + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(1, 1, 1, 1), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 2, 2, 2), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=512, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + test_mode=True), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline, + test_mode=True)) +# optimizer +optimizer = dict( + type='SGD', lr=0.2, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 180 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/r2plus1d_r34_video_3d_8x8x1_180e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_inference_8x8x1_180e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_inference_8x8x1_180e_kinetics400_rgb.py new file mode 100644 index 0000000..daf0434 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_inference_8x8x1_180e_kinetics400_rgb.py @@ -0,0 +1,56 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet2Plus1d', + depth=34, + pretrained=None, + pretrained2d=False, + norm_eval=False, + conv_cfg=dict(type='Conv2plus1d'), + norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), + act_cfg=dict(type='ReLU'), + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(1, 1, 1, 1), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 2, 2, 2), + zero_init_residual=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=512, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/README.md new file mode 100644 index 0000000..6e38fd6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/README.md @@ -0,0 +1,63 @@ +# SlowFast + +## Introduction +``` +@inproceedings{feichtenhofer2019slowfast, + title={Slowfast networks for video recognition}, + author={Feichtenhofer, Christoph and Fan, Haoqi and Malik, Jitendra and He, Kaiming}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={6202--6211}, + year={2019} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone |pretrain| top1 acc| top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[slowfast_r50_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py) |short-side 256|8x4| ResNet50|None |74.75|91.73|x|6203|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_256p_4x16x1_256e_kinetics400_rgb/slowfast_r50_256p_4x16x1_256e_kinetics400_rgb_20200728-145f1097.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_256p_4x16x1_256e_kinetics400_rgb/20200731_151706.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_256p_4x16x1_256e_kinetics400_rgb/20200731_151706.log.json)| +|[slowfast_r50_video_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py) |short-side 256|8| ResNet50|None |74.34|91.58|x|6203|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb/slowfast_r50_video_4x16x1_256e_kinetics400_rgb_20200826-f85b90c5.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb/20200812_160237.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb/20200812_160237.log.json)| +|[slowfast_r50_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py) |short-side 320|8x3| ResNet50|None |75.64|92.3|1.6 ((32+4)x10x3 frames)|6203|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb/slowfast_r50_4x16x1_256e_kinetics400_rgb_20200704-bcde7ed7.pth)| [log](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb/20200704_232901.log)| [json](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb/20200704_232901.log.json)| +|[slowfast_r50_8x8x1_256e_kinetics400_rgb](/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py) |short-side 256|8x4| ResNet50 |None |75.61|92.34|x|9062|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_256p_8x8x1_256e_kinetics400_rgb/slowfast_r50_256p_8x8x1_256e_kinetics400_rgb_20200810-863812c2.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_256p_8x8x1_256e_kinetics400_rgb/20200731_151537.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_256p_8x8x1_256e_kinetics400_rgb/20200731_151537.log.json)| +|[slowfast_r50_8x8x1_256e_kinetics400_rgb](/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py) |short-side 320|8x3| ResNet50 |None|76.94|92.8|1.3 ((32+8)x10x3 frames)|9062| [ckpt](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/20200716_192653.log)| [json](https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/20200716_192653.log.json)| + +Notes: +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train SlowFast model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py \ + --work-dir work_dirs/slowfast_r50_4x16x1_256e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test SlowFast model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips=prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_bnn.py new file mode 100644 index 0000000..3209b61 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_bnn.py @@ -0,0 +1,143 @@ +model = dict( + type='Recognizer3DBNN', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastBNNHead', + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0)) +train_cfg = dict(loss_weight=1e-6, npass=2) +test_cfg = dict(average_clips='prob', npass=10) +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=5) +total_epochs = 50 +checkpoint_config = dict(interval=10) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_slowfast_bnn' +load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_dnn.py new file mode 100644 index 0000000..e11baab --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_dnn.py @@ -0,0 +1,143 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=5) +total_epochs = 50 +checkpoint_config = dict(interval=10) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_slowfast_dnn' +load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py new file mode 100644 index 0000000..18e1afa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py @@ -0,0 +1,161 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + loss_cls=evidence_loss, + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0.5), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=2048, # only slow features are debiased + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=5) +total_epochs = 50 +checkpoint_config = dict(interval=10) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_slowfast_dnn' +load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_rpl.py new file mode 100644 index 0000000..cbbdedd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_rpl.py @@ -0,0 +1,146 @@ +model = dict( + type='Recognizer3DRPL', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, + weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=5) +total_epochs = 50 +checkpoint_config = dict(interval=10) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/finetune_ucf101_slowfast_rpl' +load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_bnn.py new file mode 100644 index 0000000..8d2f181 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_bnn.py @@ -0,0 +1,66 @@ +model = dict( + type='Recognizer3DBNN', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastBNNHead', + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0)) +test_cfg = dict(average_clips='prob', npass=10) +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_dnn.py new file mode 100644 index 0000000..028841a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_dnn.py @@ -0,0 +1,66 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0.5)) +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_enn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_enn.py new file mode 100644 index 0000000..3c12f7e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_enn.py @@ -0,0 +1,75 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + loss_cls=dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + annealing_method='exp'), + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0.5)) +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='score') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + # dict(type='OpenCVInit', num_threads=1), + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + # dict(type='OpenCVDecode'), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_rpl.py new file mode 100644 index 0000000..414dc8e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_rpl.py @@ -0,0 +1,69 @@ +model = dict( + type='Recognizer3DRPL', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + in_channels=2304, # 2048+256 + num_classes=101, + spatial_type='avg', + dropout_ratio=0.5)) +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..1adbf2c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,136 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=8, # tau + speed_ratio=8, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + in_channels=2304, # 2048+256 + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=34) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowfast_r50_3d_4x16x1_256e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..94486b7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py @@ -0,0 +1,137 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=4, # tau + speed_ratio=4, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + fusion_kernel=7, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + in_channels=2304, # 2048+256 + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=34) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowfast_r50_3d_8x8x1_256e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..4dba180 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,139 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=8, # tau + speed_ratio=8, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + in_channels=2304, # 2048+256 + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=34) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowfast_r50_video_3d_4x16x1_256e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..a1f1d03 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,68 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowFast', + pretrained=None, + resample_rate=8, # tau + speed_ratio=8, # alpha + channel_ratio=8, # beta_inv + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_eval=False)), + cls_head=dict( + type='SlowFastHead', + in_channels=2304, # 2048+256 + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) + +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/README.md new file mode 100644 index 0000000..a0c48ac --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/README.md @@ -0,0 +1,106 @@ +# SlowOnly + +## Introduction +``` +@inproceedings{feichtenhofer2019slowfast, + title={Slowfast networks for video recognition}, + author={Feichtenhofer, Christoph and Fan, Haoqi and Malik, Jitendra and He, Kaiming}, + booktitle={Proceedings of the IEEE international conference on computer vision}, + pages={6202--6211}, + year={2019} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone |pretrain| top1 acc| top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[slowonly_r50_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py)|short-side 256|8x4| ResNet50 | None |72.76|90.51|x|3168|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb_20200820-bea7701f.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb/20200817_001411.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb/20200817_001411.log.json)| +|[slowonly_r50_video_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py)|short-side 320|8x2| ResNet50 | None |72.90|90.82|x|8472|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_320p_4x16x1_256e_kinetics400_rgb/slowonly_r50_video_320p_4x16x1_256e_kinetics400_rgb_20201014-c9cdc656.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_320p_4x16x1_256e_kinetics400_rgb/slowonly_r50_video_320p_4x16x1_256e_kinetics400_rgb_20201014.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_320p_4x16x1_256e_kinetics400_rgb/slowonly_r50_video_320p_4x16x1_256e_kinetics400_rgb_20201014.json)| +|[slowonly_r50_8x8x1_256e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py) |short-side 256|8x4| ResNet50 | None |74.42|91.49|x|5820|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_8x8x1_256e_kinetics400_rgb/slowonly_r50_256p_8x8x1_256e_kinetics400_rgb_20200820-75851a7d.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_8x8x1_256e_kinetics400_rgb/20200817_003320.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_8x8x1_256e_kinetics400_rgb/20200817_003320.log.json)| +|[slowonly_r50_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py)|short-side 320|8x2| ResNet50 | None |73.02|90.77|4.0 (40x3 frames)|3168|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth)| [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/so_4x16.log)| [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16_73.02_90.77.log.json)| +|[slowonly_r50_8x8x1_256e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py) |short-side 320|8x3| ResNet50 | None |74.93|91.92|2.3 (80x3 frames)|5820| [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb/slowonly_r50_8x8x1_256e_kinetics400_rgb_20200703-a79c555a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb/so_8x8.log)| [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb/slowonly_r50_8x8_74.93_91.92.log.json)| +|[slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py)|short-side 320|8x2| ResNet50 | ImageNet |73.39|91.12|x|3168|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb_20200912-1e8fc736.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb_20200912.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb_20200912.json)| +|[slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py) |short-side 320|8x4| ResNet50 | ImageNet |75.55|92.04|x|5820|[ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb_20200912-3f9ce182.pth)|[log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb_20200912.log)|[json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb_20200912.json)| +|[slowonly_r50_4x16x1_256e_kinetics400_flow](/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py)|short-side 320|8x2| ResNet50 | ImageNet |61.79|83.62|x|8450| [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow/slowonly_r50_4x16x1_256e_kinetics400_flow_20200704-decb8568.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow/slowonly_r50_4x16x1_256e_kinetics400_flow_61.8_83.6.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow/slowonly_r50_4x16x1_256e_kinetics400_flow_61.8_83.6.log.json)| +|[slowonly_r50_8x8x1_196e_kinetics400_flow](/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py) |short-side 320|8x4| ResNet50 | ImageNet |65.76|86.25|x|8455| [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow/slowonly_r50_8x8x1_256e_kinetics400_flow_20200704-6b384243.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow/slowonly_r50_8x8x1_196e_kinetics400_flow_65.8_86.3.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow/slowonly_r50_8x8x1_196e_kinetics400_flow_65.8_86.3.log.json)| + +### Kinetics-400 Data Benchmark +In data benchmark, we compare two different data preprocessing methods: (1) Resize video to 340x256, (2) Resize the short edge of video to 320px, (3) Resize the short edge of video to 256px. + +| config | resolution | gpus | backbone | Input | pretrain | top1 acc | top5 acc | testing protocol | ckpt | log | json | +| :----------------------------------------------------------- | :------------: | :--: | :------: | :---: | :------: | :------: | :------: | :----------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py) | 340x256 | 8x2 | ResNet50 | 4x16 | None | 71.61 | 90.05 | 10 clips x 3 crops | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb_20200803-dadca1a3.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb_20200803.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb_20200803.json) | +| [slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py) | short-side 320 | 8x2 | ResNet50 | 4x16 | None | 73.02 | 90.77 | 10 clips x 3 crops | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/so_4x16.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16_73.02_90.77.log.json) | +| [slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py) | short-side 256 | 8x4 | ResNet50 | 4x16 | None | 72.76 | 90.51 | 10 clips x 3 crops | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb_20200820-bea7701f.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb/20200817_001411.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_256p_4x16x1_256e_kinetics400_rgb/20200817_001411.log.json) | + +### Kinetics-400 OmniSource Experiments + +| config | resolution | backbone | pretrain | w. OmniSource | top1 acc | top5 acc | ckpt | log | json | +| :----------------------------------------------------------: | :------------: | :-------: | :------: | :----------------: | :------: | :------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [slowonly_r50_4x16x1_256e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py) | short-side 320 | ResNet50 | None | :x: | 73.0 | 90.8 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/so_4x16.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb/slowonly_r50_4x16_73.02_90.77.log.json) | +| x | x | ResNet50 | None | :heavy_check_mark: | 76.8 | 92.5 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r50_omni_4x16x1_kinetics400_rgb_20200926-51b1f7ea.pth) | x | x | +| [slowonly_r101_8x8x1_196e_kinetics400_rgb](/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py) | x | ResNet101 | None | :x: | 76.5 | 92.7 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_without_omni_8x8x1_kinetics400_rgb_20200926-0c730aef.pth) | x | x | +| x | x | ResNet101 | None | :heavy_check_mark: | 80.4 | 94.4 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/omni/slowonly_r101_omni_8x8x1_kinetics400_rgb_20200926-b5dbb701.pth) | x | x | + +### Kinetics-600 + +| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | ckpt | log | json | +| :----------------------------------------------------------- | :------------: | :--: | :------: | :------: | :------: | :------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [slowonly_r50_video_8x8x1_256e_kinetics600_rgb](/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py) | short-side 256 | 8x4 | ResNet50 | None | 77.5 | 93.7 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb/slowonly_r50_video_8x8x1_256e_kinetics600_rgb_20201015-81e5153e.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb/slowonly_r50_video_8x8x1_256e_kinetics600_rgb_20201015.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb/slowonly_r50_video_8x8x1_256e_kinetics600_rgb_20201015.json) | + +### Kinetics-700 + +| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | ckpt | log | json | +| :----------------------------------------------------------- | :------------: | :--: | :------: | :------: | :------: | :------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [slowonly_r50_video_8x8x1_256e_kinetics700_rgb](/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py) | short-side 256 | 8x4 | ResNet50 | None | 65.0 | 86.1 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb/slowonly_r50_video_8x8x1_256e_kinetics700_rgb_20201015-9250f662.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb/slowonly_r50_video_8x8x1_256e_kinetics700_rgb_20201015.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb/slowonly_r50_video_8x8x1_256e_kinetics700_rgb_20201015.json) | + +### GYM99 + +| config | resolution | gpus | backbone | pretrain | top1 acc | mean class acc | ckpt | log | json | +| :----------------------------------------------------------- | :------------: | :--: | :------: | :------: | :------: | :------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb](/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py) | short-side 256 | 8x2 | ResNet50 | ImageNet | 79.3 | 70.2 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb_20201111-a9c34b54.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb_20201111.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb_20201111.json) | +| [slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow](/configs/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow.py) | short-side 256 | 8x2 | ResNet50 | Kinetics | 80.3 | 71.0 | [ckpt](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow_20201111-66ecdb3c.pth) | [log](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow_20201111.log) | [json](https://download.openmmlab.com/mmaction/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow_20201111.json) | +| 1: 1 Fusion | | | | | 83.7 | 74.8 | | | | + +Notes: + +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train SlowOnly model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py \ + --work-dir work_dirs/slowonly_r50_4x16x1_256e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test SlowOnly model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips=prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..f4564bd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,115 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_256p' +data_root_val = 'data/kinetics400/rawframes_val_256p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_256p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.6, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/slowonly_r50_randomresizedcrop_256p_4x16x1' + '_256e_kinetics400_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..24b12dc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,115 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.6, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/slowonly_r50_randomresizedcrop_320p_4x16x1' + '_256e_kinetics400_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..97033fa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,115 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.6, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('slowonly_r50_randomresizedcrop_320p_4x16x1' + '_256e_kinetics400_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py new file mode 100644 index 0000000..2cc9bf9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py @@ -0,0 +1,114 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/gym/subaction_frames' +data_root_val = 'data/gym/subaction_frames' +ann_file_train = 'data/gym/annotations/gym99_train_frame.txt' +ann_file_val = 'data/gym/annotations/gym99_val_frame.txt' +ann_file_test = 'data/gym/annotations/gym99_val_frame.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=24, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.03, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[90, 110]) +total_epochs = 120 +checkpoint_config = dict(interval=1) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py new file mode 100644 index 0000000..1409082 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py @@ -0,0 +1,120 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + step=[90, 130], + warmup='linear', + warmup_by_epoch=True, + warmup_iters=10) +total_epochs = 150 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/slowonly_imagenet_pretrained_r50_4x16x1_150e' + '_kinetics400_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py new file mode 100644 index 0000000..1a2e208 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py @@ -0,0 +1,120 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + step=[90, 130], + warmup='linear', + warmup_by_epoch=True, + warmup_iters=10) +total_epochs = 150 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/slowonly_imagenet_pretrained_r50_8x8x1_150e' + '_kinetics400_rgb') +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow.py new file mode 100644 index 0000000..61213d5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow.py @@ -0,0 +1,124 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + in_channels=2, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + with_pool2=False, + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/gym/subaction_frames' +data_root_val = 'data/gym/subaction_frames' +ann_file_train = 'data/gym/annotations/gym99_train_frame.txt' +ann_file_val = 'data/gym/annotations/gym99_val_frame.txt' +ann_file_test = 'data/gym/annotations/gym99_val_frame.txt' +img_norm_cfg = dict(mean=[128, 128], std=[128, 128]) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=24, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.03, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[90, 110]) +total_epochs = 120 +checkpoint_config = dict(interval=1) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/' + 'slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow') +load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/' + 'slowonly_r50_4x16x1_256e_kinetics400_flow/' + 'slowonly_r50_4x16x1_256e_kinetics400_flow_20200704-decb8568.pth') +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py new file mode 100644 index 0000000..8b5ccbc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py @@ -0,0 +1,118 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=101, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_ratio=0.1, + warmup_by_epoch=True, + warmup_iters=34) +total_epochs = 196 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r101_8x8x1_196e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py new file mode 100644 index 0000000..78ae06d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py @@ -0,0 +1,126 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + in_channels=2, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + with_pool2=False, + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' +ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' +ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' +img_norm_cfg = dict(mean=[128, 128], std=[128, 128]) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=24, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.06, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=34) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_4x16x1_256e_kinetics400_flow' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..6125689 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,114 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_4x16x1_256e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py new file mode 100644 index 0000000..d3f106e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py @@ -0,0 +1,126 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + in_channels=2, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + with_pool2=False, + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' +ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' +ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' +img_norm_cfg = dict(mean=[128, 128], std=[128, 128]) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + modality='Flow', + filename_tmpl='{}_{:05d}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.06, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_by_epoch=True, + warmup_iters=34) +total_epochs = 196 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_8x8x1_256e_kinetics400_flow' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..65cd20d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py @@ -0,0 +1,114 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_8x8x1_256e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..97c15e0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,117 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=24, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.3, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_video_4x16x1_256e_kinetics400_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py new file mode 100644 index 0000000..e91549b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py @@ -0,0 +1,114 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=600, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/kinetics600/videos_train' +data_root_val = 'data/kinetics600/videos_val' +ann_file_train = 'data/kinetics600/kinetics600_train_list_videos.txt' +ann_file_val = 'data/kinetics600/kinetics600_val_list_videos.txt' +ann_file_test = 'data/kinetics600/kinetics600_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_video_8x8x1_256e_kinetics600_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py new file mode 100644 index 0000000..de7661b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py @@ -0,0 +1,114 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=700, + spatial_type='avg', + dropout_ratio=0.5)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/kinetics700/videos_train' +data_root_val = 'data/kinetics700/videos_val' +ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' +ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' +ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.15, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 256 +checkpoint_config = dict(interval=4) +workflow = [('train', 1)] +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/slowonly_r50_video_8x8x1_256e_kinetics700_rgb' +load_from = None +resume_from = None +find_unused_parameters = False diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_inference_4x16x1_256e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_inference_4x16x1_256e_kinetics400_rgb.py new file mode 100644 index 0000000..e2669a8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_inference_4x16x1_256e_kinetics400_rgb.py @@ -0,0 +1,47 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + cls_head=dict( + type='I3DHead', + in_channels=2048, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5)) + +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=4, + frame_interval=16, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/README.md new file mode 100644 index 0000000..cca26b9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/README.md @@ -0,0 +1,75 @@ +# TIN + +## Introduction +``` +@article{shao2020temporal, + title={Temporal Interlacing Network}, + author={Hao Shao and Shengju Qian and Yu Liu}, + year={2020}, + journal={AAAI}, +} +``` + +## Model Zoo + +### Something-Something V1 + +|config | resolution | gpus | backbone| pretrain | top1 acc| top5 acc | reference top1 acc | reference top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tin_r50_1x1x8_40e_sthv1_rgb](/configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py)|height 100|8x4| ResNet50 | ImageNet | 44.25 | 73.94 | 44.04 | 72.72 | 6181 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb/tin_r50_1x1x8_40e_sthv1_rgb_20200729-4a33db86.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb/20200729_034132.log) | [json](https://download.openmmlab.com/mmaction/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb/20200729_034132.log.json) | + +### Something-Something V2 + +|config | resolution | gpus | backbone| pretrain | top1 acc| top5 acc | reference top1 acc | reference top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tin_r50_1x1x8_40e_sthv2_rgb](/configs/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb.py)|height 240|8x4| ResNet50 | ImageNet | 56.70 | 83.62 | 56.48 | 83.45 | 6185 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb/tin_r50_1x1x8_40e_sthv2_rgb_20200912-b27a7337.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb/20200912_225451.log) | [json](https://download.openmmlab.com/mmaction/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb/20200912_225451.log.json) | + +### Kinetics-400 + +|config | resolution | gpus | backbone| pretrain | top1 acc| top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb](/configs/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py)|short-side 256|8x4| ResNet50 | TSM-Kinetics400 | 70.89 | 89.89 | 6187 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb_20200810-4a146a70.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb/20200809_142447.log) | [json](https://download.openmmlab.com/mmaction/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb/20200809_142447.log.json) | + +Here, we use `finetune` to indicate that we use [TSM model](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb/tsm_r50_1x1x8_50e_kinetics400_rgb_20200607-af7fb746.pth) trained on Kinetics-400 to finetune the TIN model on Kinetics-400. + +Notes: +1. The **reference topk acc** are got by training the [original repo #1aacd0c](https://github.com/deepcs233/TIN/tree/1aacd0c4c30d5e1d334bf023e55b855b59f158db) with no [AverageMeter issue](https://github.com/deepcs233/TIN/issues/4). +The [AverageMeter issue](https://github.com/deepcs233/TIN/issues/4) will lead to incorrect performance, so we fix it before running. +2. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +3. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. +4. The values in columns named after "reference" are the results got by training on the original repo, using the same model settings. + +For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train TIN model on Something-Something V1 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py \ + --work-dir work_dirs/tin_r50_1x1x8_40e_sthv1_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test TIN model on Something-Something V1 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py new file mode 100644 index 0000000..2d40649 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py @@ -0,0 +1,131 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTIN', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=4), + cls_head=dict( + type='TSMHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.001, + is_shift=False)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sth-v1/rawframes_train/' +data_root_val = 'data/sth-v1/rawframes_val/' +ann_file_train = 'data/sth-v1/sth-v1_train_list.txt' +ann_file_val = 'data/sth-v1/sth-v1_val_list.txt' +ann_file_test = 'data/sth-v1/sth-v1_val_list.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.02, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + min_lr_ratio=0.5, + warmup='linear', + warmup_ratio=0.1, + warmup_by_epoch=True, + warmup_iters=1) +total_epochs = 40 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tin_r50_1x1x8_40e_sthv1_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb.py new file mode 100644 index 0000000..1ace88a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb.py @@ -0,0 +1,131 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTIN', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=4), + cls_head=dict( + type='TSMHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.001, + is_shift=False)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sth-v2/rawframes_train/' +data_root_val = 'data/sth-v2/rawframes_val/' +ann_file_train = 'data/sth-v2/sth-v2_train_list.txt' +ann_file_val = 'data/sth-v2/sth-v2_val_list.txt' +ann_file_test = 'data/sth-v2/sth-v2_val_list.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.02, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + by_epoch=False, + warmup='linear', + warmup_iters=1, + warmup_by_epoch=True, + min_lr=0) +total_epochs = 40 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tin_r50_1x1x8_40e_sthv2_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..0ad3c7b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,124 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTIN', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=4), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb/' +# load_from = 'modelzoo/tsm_r50_1x1x8_50e_kinetics400_rgb_20200607-af7fb746.pth' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb/tsm_r50_1x1x8_50e_kinetics400_rgb_20200607-af7fb746.pth' # noqa: E501 +resume_from = None diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/README.md new file mode 100644 index 0000000..b40ae97 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/README.md @@ -0,0 +1,59 @@ +# TPN + +## Introduction +``` +@inproceedings{yang2020tpn, + title={Temporal Pyramid Network for Action Recognition}, + author={Yang, Ceyuan and Xu, Yinghao and Shi, Jianping and Dai, Bo and Zhou, Bolei}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2020}, +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone | pretrain | top1 acc| top5 acc | reference top1 acc | reference top5 acc | inference_time(video/s) | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tpn_slowonly_r50_8x8x1_150e_kinetics_rgb](/configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py)|short-side 320|8x4| ResNet50 | ImageNet | 73.10 | 91.03 | x | x | x | 6916 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb_20200910-b796d7a0.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb/20200910_134330.log) | [json](https://download.openmmlab.com/mmaction/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb/20200910_134330.log.json) | +|[tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb](/configs/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb.py)|short-side 320|8x4| ResNet50 | ImageNet | 76.20 | 92.44 | [75.49](https://github.com/decisionforce/TPN/blob/master/MODELZOO.md) | [92.05](https://github.com/decisionforce/TPN/blob/master/MODELZOO.md) | x | 6916 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/20200923_151919.log) | [json](https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/20200923_151919.log.json) | + +Notes: +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. +3. The values in columns named after "reference" are the results got by testing the checkpoint released on the original repo and codes, using the same dataset with ours. + +For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train TPN model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb [--validate --seed 0 --deterministic] +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test TPN model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py new file mode 100644 index 0000000..30499aa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py @@ -0,0 +1,141 @@ +# model settings +model = dict( + type='Recognizer3DRPL', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_rpl' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_bnn.py new file mode 100644 index 0000000..b1fb8ac --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_bnn.py @@ -0,0 +1,64 @@ +# model settings +model = dict( + type='Recognizer3DBNN', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1)))), + cls_head=dict( + type='TPNBNNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0, # for bnn, dropout is not necessary + init_std=0.01)) +test_cfg = dict(average_clips='prob', npass=10) +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_dnn.py new file mode 100644 index 0000000..9ba1602 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_dnn.py @@ -0,0 +1,65 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1)))), + cls_head=dict( + type='TPNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_enn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_enn.py new file mode 100644 index 0000000..29e2c18 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_enn.py @@ -0,0 +1,65 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1)))), + cls_head=dict( + type='TPNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='score') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_rpl.py new file mode 100644 index 0000000..d488b45 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_rpl.py @@ -0,0 +1,68 @@ +# model settings +model = dict( + type='Recognizer3DRPL', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1)))), + cls_head=dict( + type='TPNRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..6ba644e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,132 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=400, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='FrameSelector'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=8, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[75, 125]) +total_epochs = 150 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics400_rgb' # noqa: E501 +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..75a9e55 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,139 @@ +# model settings +model = dict( + type='Recognizer3DBNN', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNBNNHead', + loss_cls=dict(type='BayesianNNLoss'), + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0, # for bnn, dropout is not necessary + init_std=0.01)) +train_cfg = dict(loss_weight=1e-6, npass=2) +test_cfg = dict(average_clips='prob', npass=10) +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_celoss_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_celoss_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..f04ea0d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_celoss_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,138 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..cdf439e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,146 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..71f2e01 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,155 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=1024, + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..dd53169 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,147 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_rebias_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_rebias_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..9ece651 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_rebias_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,149 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + rebias_head_cfg=dict(out_channels=101, + loss_weight=0.5, + loss_rebias=dict(type='RebiasLoss', lambda_g=1.0, criteria='hsic'))), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_rebias' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_davuc_debias_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_davuc_debias_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..48b0b4e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_davuc_debias_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,156 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + disentangle=True, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=1024, + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..9e66ad1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,147 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=False, + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..810e4d4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,145 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + annealing_method='exp') +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained='torchvision://resnet50', + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='OpenCVDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss' # noqa: E501 +load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py new file mode 100644 index 0000000..6d66522 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py @@ -0,0 +1,132 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='ResNet3dSlowOnly', + depth=50, + pretrained=None, + lateral=False, + out_indices=(2, 3), + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + norm_eval=False), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=400, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips='prob') +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='FrameSelector'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=8, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[75, 125]) +total_epochs = 150 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_slowonly_r50_8x8x1_150e_kinetics400_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py new file mode 100644 index 0000000..3abf995 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + out_indices=(2, 3), + norm_eval=False, + shift_div=8), + neck=dict( + type='TPN', + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=174, loss_weight=0.5)), + cls_head=dict( + type='TPNHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips=None) +dataset_type = 'RawframeDataset' +data_root = 'data/sthv1/rawframes' +data_root_val = 'data/sthv1/rawframes' +ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' +ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' +ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='FrameSelector'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='ColorJitter', color_space_aug=True), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=8, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + nesterov=True) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[75, 125]) +total_epochs = 150 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tpn_tsm_r50_8x8x1_150e_kinetics400_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/README.md new file mode 100644 index 0000000..4abd5b7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/README.md @@ -0,0 +1,115 @@ +# TSM + +## Introduction +``` +@inproceedings{lin2019tsm, + title={TSM: Temporal Shift Module for Efficient Video Understanding}, + author={Lin, Ji and Gan, Chuang and Han, Song}, + booktitle={Proceedings of the IEEE International Conference on Computer Vision}, + year={2019} +} + +@article{NonLocal2018, + author = {Xiaolong Wang and Ross Girshick and Abhinav Gupta and Kaiming He}, + title = {Non-local Neural Networks}, + journal = {CVPR}, + year = {2018} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | gpus | backbone | pretrain | top1 acc| top5 acc | reference top1 acc | reference top5 acc | inference_time(video/s) | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsm_r50_1x1x8_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py) |340x256|8| ResNet50| ImageNet |70.24|89.56|[70.36](https://github.com/mit-han-lab/temporal-shift-module/blob/8d53d6fda40bea2f1b37a6095279c4b454d672bd/scripts/train_tsm_kinetics_rgb_8f.sh)|[89.49](https://github.com/mit-han-lab/temporal-shift-module/blob/8d53d6fda40bea2f1b37a6095279c4b454d672bd/scripts/train_tsm_kinetics_rgb_8f.sh)|74.0 (8x1 frames)| 7079 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb/tsm_r50_1x1x8_50e_kinetics400_rgb_20200607-af7fb746.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb/20200607_211800.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb/20200607_211800.log.json)| +|[tsm_r50_1x1x8_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py) |short-side 256|8| ResNet50| ImageNet |70.59|89.52|x|x|x|7079|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_256p_1x1x8_50e_kinetics400_rgb/tsm_r50_256p_1x1x8_50e_kinetics400_rgb_20200726-020785e2.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_256p_1x1x8_50e_kinetics400_rgb/20200725_031623.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_256p_1x1x8_50e_kinetics400_rgb/20200725_031623.log.json)| +|[tsm_r50_video_1x1x8_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_kinetics400_rgb.py) |short-side 256|8| ResNet50| ImageNet |70.25|89.66|[70.36](https://github.com/mit-han-lab/temporal-shift-module/blob/8d53d6fda40bea2f1b37a6095279c4b454d672bd/scripts/train_tsm_kinetics_rgb_8f.sh)|[89.49](https://github.com/mit-han-lab/temporal-shift-module/blob/8d53d6fda40bea2f1b37a6095279c4b454d672bd/scripts/train_tsm_kinetics_rgb_8f.sh)|74.0 (8x1 frames)| 7077 | [ckpt]( https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_video_1x1x8_100e_kinetics400_rgb/tsm_r50_video_1x1x8_100e_kinetics400_rgb_20200702-a77f4328.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_video_1x1x8_100e_kinetics400_rgb/tsm_r50_video_2d_1x1x8_50e_kinetics400_rgb.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_video_1x1x8_100e_kinetics400_rgb/tsm_r50_video_2d_1x1x8_50e_kinetics400_rgb.log.json)| +|[tsm_r50_dense_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py) |340x256|8x4| ResNet50 | ImageNet|72.9|90.44|[72.22](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#dense-sample)|[90.37](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#dense-sample)|11.5 (8x10 frames)| 7079 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_1x1x8_100e_kinetics400_rgb_20200626-91a54551.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb/20200626_213415.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb/20200626_213415.log.json)| +|[tsm_r50_dense_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50 | ImageNet|73.38|91.02|x|x|x|7079|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/20200725_032043.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/20200725_032043.log.json)| +|[tsm_r50_1x1x16_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py) |340x256|8| ResNet50| ImageNet |72.09|90.37|[70.67](https://github.com/mit-han-lab/temporal-shift-module/blob/8d53d6fda40bea2f1b37a6095279c4b454d672bd/scripts/train_tsm_kinetics_rgb_16f.sh)|[89.98](https://github.com/mit-han-lab/temporal-shift-module/blob/8d53d6fda40bea2f1b37a6095279c4b454d672bd/scripts/train_tsm_kinetics_rgb_16f.sh)|47.0 (16x1 frames)| 10404 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb/tsm_r50_340x256_1x1x16_50e_kinetics400_rgb_20201011-2f27f229.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb/20201011_205356.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb/20201011_205356.log.json)| +|[tsm_r50_1x1x16_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py) |short-side 256|8x4| ResNet50| ImageNet |71.89|90.73|x|x|x|10398|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_256p_1x1x16_50e_kinetics400_rgb/tsm_r50_256p_1x1x16_50e_kinetics400_rgb_20201010-85645c2a.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_256p_1x1x16_50e_kinetics400_rgb/20201010_224825.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_256p_1x1x16_50e_kinetics400_rgb/20201010_224825.log.json)| +|[tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb.py)|short-side 320|8x4| ResNet50| ImageNet |72.03|90.25|71.81|90.36|x|8931|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb_20200724-f00f1336.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb/20200724_120023.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb/20200724_120023.log.json)| +|[tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb.py)|short-side 320|8x4| ResNet50| ImageNet |70.70|89.90|x|x|x|10125|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb_20200816-b93fd297.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/20200815_210253.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/20200815_210253.log.json)| +|[tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb](/configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py)|short-side 320|8x4|ResNet50| ImageNet |71.60|90.34|x|x|x|8358|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb_20200724-d8ad84d2.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb/20200723_220442.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb/20200723_220442.log.json)| + +### Something-Something V1 + +|config | resolution | gpus | backbone| pretrain | top1 acc (efficient/accurate)| top5 acc (efficient/accurate)| reference top1 acc (efficient/accurate)| reference top5 acc (efficient/accurate)| gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsm_r50_1x1x8_50e_sthv1_rgb](/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py) |height 100|8| ResNet50 | ImageNet|45.46 / 47.21|74.71 / 76.09|[45.50 / 47.33](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[74.34 / 76.60](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 7077| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb/tsm_r50_1x1x8_50e_sthv1_rgb_20200616-3417f361.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb/20200616_022852.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb/20200616_022852.log.json)| +|[tsm_r50_1x1x16_50e_sthv1_rgb](/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py)|height 100|8| ResNet50 | ImageNet|47.62 / 49.28|76.63 / 77.82|[47.05 / 48.61](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[76.40 / 77.96](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|10390|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb/tsm_r50_1x1x16_50e_sthv1_rgb_20201010-17fa49f6.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb/20201010_221240.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb/20201010_221240.log.json)| +|[tsm_r101_1x1x8_50e_sthv1_rgb](/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py)|height 100|8| ResNet50 | ImageNet|45.72 / 48.43|74.67 / 76.72|[46.64 / 48.13](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[75.40 / 77.31](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|9800|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb/tsm_r101_1x1x8_50e_sthv1_rgb_20201010-43fedf2e.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb/20201010_224055.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb/20201010_224055.log.json)| + +### Something-Something V2 + +|config | resolution | gpus | backbone | pretrain| top1 acc (efficient/accurate)| top5 acc (efficient/accurate)| reference top1 acc (efficient/accurate)| reference top5 acc (efficient/accurate)| gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsm_r50_1x1x8_50e_sthv2_rgb](/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py) |height 240|8| ResNet50| ImageNet |57.86 / 61.12|84.67 / 86.26|[57.98 / 60.69](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[84.57 / 86.28](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 7069 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb/tsm_r50_1x1x8_50e_sthv2_rgb_20200912-033c4ac6.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb/20200912_140737.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb/20200912_140737.log.json)| +|[tsm_r50_1x1x16_50e_sthv2_rgb](/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py) |height 240|8| ResNet50| ImageNet |59.93 / 62.04|86.10 / 87.35|[58.90 / 60.98](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[85.29 / 86.60](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 10400| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb/tsm_r50_1x1x16_50e_sthv2_rgb_20201010-16469c6f.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb/20201010_224215.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb/20201010_224215.log.json)| +|[tsm_r101_1x1x8_50e_sthv2_rgb](/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py) |height 240|8| ResNet101 | ImageNet|58.59 / 61.51|85.07 / 86.90|[58.89 / 61.36](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[85.14 / 87.00](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 9784 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb/tsm_r101_1x1x8_50e_sthv2_rgb_20201010-98cdedb8.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb/20201010_224100.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb/20201010_224100.log.json)| + +Notes: +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. +3. The values in columns named after "reference" are the results got by training on the original repo, using the same model settings. The checkpoints for reference repo can be downloaded [here](https://download.openmmlab.com/mmaction/recognition/tsm/tsm_reference_ckpt.rar). +4. There are two kinds of test settings for Something-Something dataset, efficient setting (center crop * 1 clip) and accurate setting (Three crop * 2 clip), which is referred from the [original repo](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd). +We use efficient setting as default provided in config files, and it can be changed to accurate setting by +```python +... +test_cfg = dict(average_clips='prob') +... +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, # `num_clips = 8` when using 8 segments + twice_sample=True, # set `twice_sample=True` for twice sample in accurate setting + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + # dict(type='CenterCrop', crop_size=224), it is used for efficient setting + dict(type='ThreeCrop', crop_size=256), # it is used for accurate setting + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +``` + +For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/data_preparation.md). + +## Train +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train TSM model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py \ + --work-dir work_dirs/tsm_r50_1x1x8_100e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test TSM model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_bnn.py new file mode 100644 index 0000000..2589b50 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_bnn.py @@ -0,0 +1,131 @@ +# model settings +model = dict( + type='Recognizer2DBNN', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMBNNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = dict(loss_weight=1e-6, npass=2) +test_cfg = dict(average_clips='prob', npass=10) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.001, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm/finetune_ucf101_tsm_bnn' +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_dnn.py new file mode 100644 index 0000000..498038e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_dnn.py @@ -0,0 +1,133 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.001, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm/finetune_ucf101_tsm_dnn' +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py new file mode 100644 index 0000000..80d368d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py @@ -0,0 +1,150 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=2048, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.001, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias' +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_rpl.py new file mode 100644 index 0000000..c748e73 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_rpl.py @@ -0,0 +1,134 @@ +# model settings +model = dict( + type='Recognizer2DRPL', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/ucf101/videos' +data_root_val = 'data/ucf101/videos' +ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' +ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' +ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + lr=0.001, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm/finetune_ucf101_tsm_rpl' +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_bnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_bnn.py new file mode 100644 index 0000000..940b7e5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_bnn.py @@ -0,0 +1,50 @@ +# model settings +model = dict( + type='Recognizer2DBNN', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMBNNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0, + init_std=0.001, + is_shift=True)) +# model training and testing settings +test_cfg = dict(average_clips='prob', npass=10) +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_dnn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_dnn.py new file mode 100644 index 0000000..a392cd4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_dnn.py @@ -0,0 +1,50 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_enn.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_enn.py new file mode 100644 index 0000000..7880afb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_enn.py @@ -0,0 +1,56 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + loss_cls=dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + annealing_method='exp'), + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +evidence='exp' # only used for EDL +test_cfg = dict(average_clips='score') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_rpl.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_rpl.py new file mode 100644 index 0000000..bdeb701 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_rpl.py @@ -0,0 +1,53 @@ +# model settings +model = dict( + type='Recognizer2DRPL', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMRPLHead', + loss_cls=dict(type='RPLoss', + temperature=1, + weight_pl=0.1), + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + start_index=0, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR.py new file mode 100644 index 0000000..f73bb21 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR.py @@ -0,0 +1,150 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True), + debias_head=dict( + type='DebiasHead', + loss_cls=evidence_loss, # actually not used! + loss_factor=0.1, + num_classes=101, + in_channels=2048, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics10/videos_train' +data_root_val = 'data/kinetics10/videos_val' +ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' +ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' +ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=4, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.001, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm/train_kinetics10_tsm_DEAR' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR_noDebias.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR_noDebias.py new file mode 100644 index 0000000..e51ffef --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR_noDebias.py @@ -0,0 +1,142 @@ +# model settings +evidence_loss = dict(type='EvidenceLoss', + num_classes=101, + evidence='exp', + loss_type='log', + with_kldiv=False, + with_avuloss=True, + annealing_method='exp') +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + loss_cls=evidence_loss, + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='evidence', evidence_type='exp') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics10/videos_train' +data_root_val = 'data/kinetics10/videos_val' +ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' +ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' +ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=4, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.001, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=10) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +annealing_runner = True +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm/train_kinetics10_tsm_DEAR_noDebias' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..e59c8ec --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,132 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)), + non_local_cfg=dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='dot_product'), + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..4253371 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,132 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)), + non_local_cfg=dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='embedded_gaussian'), + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb/' # noqa: E501 +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..9a4d25b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,132 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)), + non_local_cfg=dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='gaussian'), + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py new file mode 100644 index 0000000..fadb840 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py @@ -0,0 +1,126 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet101', + depth=101, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv1/rawframes' +data_root_val = 'data/sthv1/rawframes' +ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' +ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' +ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r101_1x1x8_50e_sthv1_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py new file mode 100644 index 0000000..847f290 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py @@ -0,0 +1,123 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet101', + depth=101, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv2/rawframes' +data_root_val = 'data/sthv2/rawframes' +ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r101_1x1x8_50e_sthv2_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py new file mode 100644 index 0000000..be61619 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + num_segments=16, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + num_segments=16, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.0075, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_1x1x16_50e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py new file mode 100644 index 0000000..a571b8a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + num_segments=16, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=174, + num_segments=16, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv1/rawframes' +data_root_val = 'data/sthv1/rawframes' +ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' +ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' +ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.0075, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_1x1x16_50e_sthv1_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py new file mode 100644 index 0000000..e9bae6b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + num_segments=16, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=174, + num_segments=16, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv2/rawframes' +data_root_val = 'data/sthv2/rawframes' +ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.0075, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_1x1x16_50e_sthv2_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..d737207 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,126 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py new file mode 100644 index 0000000..5f6f895 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py @@ -0,0 +1,126 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv1/rawframes' +data_root_val = 'data/sthv1/rawframes' +ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' +ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' +ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_1x1x8_50e_sthv1_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py new file mode 100644 index 0000000..1738648 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py @@ -0,0 +1,123 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv2/rawframes' +data_root_val = 'data/sthv2/rawframes' +ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=6, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.0075, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0005) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_1x1x8_50e_sthv2_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py new file mode 100644 index 0000000..f1470ba --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py @@ -0,0 +1,127 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.02, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_dense_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..a936169 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,129 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.02, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_r50_video_2d_1x1x8_50e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_inference_1x1x8_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_inference_1x1x8_100e_kinetics400_rgb.py new file mode 100644 index 0000000..e09f5f1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_inference_1x1x8_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + is_shift=True)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='DecordInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_temporal_pool_r50_1x1x8_50e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_temporal_pool_r50_1x1x8_50e_kinetics400_rgb.py new file mode 100644 index 0000000..5d79eaf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_temporal_pool_r50_1x1x8_50e_kinetics400_rgb.py @@ -0,0 +1,128 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + temporal_pool=True, + shift_div=8), + cls_head=dict( + type='TSMHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001, + temporal_pool=True, + is_shift=True)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsm_temporal_pool_r50_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/README.md new file mode 100644 index 0000000..d1749f4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/README.md @@ -0,0 +1,200 @@ +# TSN + +## Introduction +``` +@inproceedings{wang2016temporal, + title={Temporal segment networks: Towards good practices for deep action recognition}, + author={Wang, Limin and Xiong, Yuanjun and Wang, Zhe and Qiao, Yu and Lin, Dahua and Tang, Xiaoou and Van Gool, Luc}, + booktitle={European conference on computer vision}, + pages={20--36}, + year={2016}, + organization={Springer} +} +``` + +## Model Zoo + +### UCF-101 + +|config | gpus | backbone | pretrain | top1 acc| top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r50_1x1x3_75e_ucf101_rgb](/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py) [1] |8| ResNet50 | ImageNet |83.03|96.78|8332| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb/tsn_r50_1x1x3_75e_ucf101_rgb_20201023-d85ab600.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb/tsn_r50_1x1x3_75e_ucf101_rgb_20201023.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb/tsn_r50_1x1x3_75e_ucf101_rgb_20201023.json) | + +[1] We report the performance on UCF-101 split1. +### HMDB51 + +|config | gpus | backbone | pretrain | top1 acc| top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb](/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py)|8| ResNet50 | ImageNet | 48.95| 80.19| 21535| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb_20201123-ce6c27ed.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb/20201025_231108.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb/20201025_231108.log.json) | +|[tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py) |8| ResNet50 | Kinetics400 | 56.08 | 84.31 | 21535| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb_20201123-7f84701b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb/20201108_190805.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb/20201108_190805.log.json) | +|[tsn_r50_1x1x8_50e_hmdb51_mit_rgb](/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py) |8| ResNet50 | Moments | 54.25 | 83.86| 21535| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb/tsn_r50_1x1x8_50e_hmdb51_mit_rgb_20201123-01526d41.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb/20201112_170135.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb/20201112_170135.log.json) | + + +### Kinetics-400 + +|config | resolution | gpus | backbone|pretrain | top1 acc| top5 acc | reference top1 acc | reference top5 acc | inference_time(video/s) | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r50_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py) |340x256|8| ResNet50 | ImageNet|70.60|89.26|x|x|4.3 (25x10 frames)|8344| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/20200614_063526.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/20200614_063526.log.json)| +|[tsn_r50_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50 | ImageNet|70.42|89.03|x|x|x|8343|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x3_100e_kinetics400_rgb/tsn_r50_256p_1x1x3_100e_kinetics400_rgb_20200725-22592236.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x3_100e_kinetics400_rgb/20200725_031325.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x3_100e_kinetics400_rgb/20200725_031325.log.json)| +|[tsn_r50_dense_1x1x5_50e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py) |340x256|8x3| ResNet50| ImageNet |70.18|89.10|[69.15](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[88.56](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|12.7 (8x10 frames)|7028| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb/tsn_r50_dense_1x1x5_100e_kinetics400_rgb_20200627-a063165f.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb/20200627_105310.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb/20200627_105310.log.json)| +|[tsn_r50_320p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py) |short-side 320|8x2| ResNet50| ImageNet |70.91|89.51|x|x|10.7 (25x3 frames)| 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_320p_1x1x3_100e_kinetics400_rgb_20200702-cc665e2a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_f3_kinetics400_shortedge_70.9_89.5.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_f3_kinetics400_shortedge_70.9_89.5.log.json)| +|[tsn_r50_320p_1x1x3_110e_kinetics400_flow](/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py) |short-side 320|8x2| ResNet50 | ImageNet|55.70|79.85|x|x|x| 8471 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow/tsn_r50_320p_1x1x3_110e_kinetics400_flow_20200705-3036bab6.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow/tsn_r50_f3_kinetics400_flow_shortedge_55.7_79.9.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow/tsn_r50_f3_kinetics400_flow_shortedge_55.7_79.9.log.json)| +|tsn_r50_320p_1x1x3_kinetics400_twostream [1: 1]* |x|x| ResNet50 | ImageNet|72.76|90.52| x | x | x | x | x|x|x| +|[tsn_r50_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py)|short-side 256|8| ResNet50| ImageNet |71.80|90.17|x|x|x|8343|[ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x8_100e_kinetics400_rgb/tsn_r50_256p_1x1x8_100e_kinetics400_rgb_20200817-883baf16.pth)|[log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x8_100e_kinetics400_rgb/20200815_173413.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x8_100e_kinetics400_rgb/20200815_173413.log.json)| +|[tsn_r50_320p_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py) |short-side 320|8x3| ResNet50| ImageNet |72.41|90.55|x|x|11.1 (25x3 frames)| 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb/tsn_r50_320p_1x1x8_100e_kinetics400_rgb_20200702-ef80e3d7.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb/tsn_r50_f8_kinetics400_shortedge_72.4_90.6.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb/tsn_r50_f8_kinetics400_shortedge_72.4_90.6.log.json)| +|[tsn_r50_320p_1x1x8_110e_kinetics400_flow](/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py) |short-side 320|8x4| ResNet50 | ImageNet|57.76|80.99|x|x|x| 8473 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow/tsn_r50_320p_1x1x8_110e_kinetics400_flow_20200705-1f39486b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow/tsn_r50_f8_kinetics400_flow_shortedge_57.8_81.0.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow/tsn_r50_f8_kinetics400_flow_shortedge_57.8_81.0.log.json)| +|tsn_r50_320p_1x1x8_kinetics400_twostream [1: 1]* |x|x| ResNet50| ImageNet |74.64|91.77| x | x | x | x | x|x|x| +|[tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py) |short-side 320|8| ResNet50 | ImageNet |71.11|90.04| x | x | x | 8343 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb_20201014-5ae1ee79.pth) |[log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb_20201014.log)|[json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb_20201014.json)| +|[tsn_r50_dense_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py) |340x256|8| ResNet50 | ImageNet|70.77|89.3|[68.75](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[88.42](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|12.2 (8x10 frames)|8344| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb/tsn_r50_dense_1x1x8_100e_kinetics400_rgb_20200606-e925e6e3.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb/20200606_003901.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb/20200606_003901.log.json)| +|[tsn_r50_video_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50| ImageNet | 71.79 | 90.25 |x|x|x|21558| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb/tsn_r50_video_1x1x8_100e_kinetics400_rgb_20200702-568cde33.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb/tsn_r50_video_2d_1x1x8_100e_kinetics400_rgb.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb/tsn_r50_video_2d_1x1x8_100e_kinetics400_rgb.log.json)| +|[tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py) |short-side 256|8| ResNet50| ImageNet | 70.40 | 89.12 |x|x|x|21553| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb_20200703-0f19175f.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb/tsn_r50_video_2d_1x1x8_dense_100e_kinetics400_rgb.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb/tsn_r50_video_2d_1x1x8_dense_100e_kinetics400_rgb.log.json)| + +Here, We use [1: 1] to indicate that we combine rgb and flow score with coefficients 1: 1 to get the two-stream prediction (without applying softmax). + +### Kinetics-400 Data Benchmark (8-gpus, ResNet50, ImageNet pretrain; 3 segments) + +In data benchmark, we compare: +1. Different data preprocessing methods: (1) Resize video to 340x256, (2) Resize the short edge of video to 320px, (3) Resize the short edge of video to 256px; +2. Different data augmentation methods: (1) MultiScaleCrop, (2) RandomResizedCrop; +3. Different testing protocols: (1) 25 frames x 10 crops, (2) 25 frames x 3 crops. + +| config | resolution | training augmentation | testing protocol | top1 acc | top5 acc | ckpt | log | json | +| :----------------------------------------------------------: | :------------: | :-------------------: | :--------------: | :------: | :------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py) | 340x256 | MultiScaleCrop | 25x10 frames | 70.60 | 89.26 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/20200614_063526.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/20200614_063526.log.json) | +| x | 340x256 | MultiScaleCrop | 25x3 frames | 70.52 | 89.39 | x | x | x | +| [tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py) | 340x256 | RandomResizedCrop | 25x10 frames | 70.11 | 89.01 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb_20200725-88cb325a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb_20200725.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb_20200725.json) | +| x | 340x256 | RandomResizedCrop | 25x3 frames | 69.95 | 89.02 | x | x | x | +| [tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py) | short-side 320 | MultiScaleCrop | 25x10 frames | 70.32 | 89.25 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb_20200725-9922802f.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb_20200725.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb_20200725.json) | +| x | short-side 320 | MultiScaleCrop | 25x3 frames | 70.54 | 89.39 | x | x | x | +| [tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py) | short-side 320 | RandomResizedCrop | 25x10 frames | 70.44 | 89.23 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_320p_1x1x3_100e_kinetics400_rgb_20200702-cc665e2a.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_f3_kinetics400_shortedge_70.9_89.5.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/tsn_r50_f3_kinetics400_shortedge_70.9_89.5.log.json) | +| x | short-side 320 | RandomResizedCrop | 25x3 frames | 70.91 | 89.51 | x | x | x | +| [tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py) | short-side 256 | MultiScaleCrop | 25x10 frames | 70.42 | 89.03 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x3_100e_kinetics400_rgb/tsn_r50_256p_1x1x3_100e_kinetics400_rgb_20200725-22592236.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x3_100e_kinetics400_rgb/20200725_031325.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x3_100e_kinetics400_rgb/20200725_031325.log.json)| +| x | short-side 256 | MultiScaleCrop | 25x3 frames | 70.79 | 89.42 | x | x | x | +| [tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py) | short-side 256 | RandomResizedCrop | 25x10 frames | 69.80 | 89.06 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_randomresize_1x1x3_100e_kinetics400_rgb/tsn_r50_256p_randomresize_1x1x3_100e_kinetics400_rgb_20200817-ae7963ca.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_randomresize_1x1x3_100e_kinetics400_rgb/20200815_172601.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_randomresize_1x1x3_100e_kinetics400_rgb/20200815_172601.log.json)| +| x | short-side 256 | RandomResizedCrop | 25x3 frames | 70.48 | 89.89 | x | x | x | + +### Kinetics-400 OmniSource Experiments + +| config | resolution | backbone | pretrain | w. OmniSource | top1 acc | top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log | json | +| :----------------------------------------------------------: | :------------: | :------: | :-------: | :----------------: | :------: | :------: | :---------------------: | :--------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [tsn_r50_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py) | 340x256 | ResNet50 | ImageNet | :x: | 70.6 | 89.3 | 4.3 (25x10 frames) | 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/20200614_063526.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/20200614_063526.log.json) | +| x | 340x256 | ResNet50 | ImageNet | :heavy_check_mark: | 73.6 | 91.0 | x | 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_imagenet_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-54192355.pth) | x | x | +| x | short-side 320 | ResNet50 | IG-1B [1] | :x: | 73.1 | 90.4 | x | 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_without_omni_1x1x3_kinetics400_rgb_20200926-c133dd49.pth) | x | x | +| x | short-side 320 | ResNet50 | IG-1B [1] | :heavy_check_mark: | 75.7 | 91.9 | x | 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/omni/tsn_1G1B_pretrained_r50_omni_1x1x3_kinetics400_rgb_20200926-2863fed0.pth) | x | x | + +[1] We obtain the pre-trained model from [torch-hub](https://pytorch.org/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext/), the pretrain model we used is `resnet50_swsl` + +### Kinetics-600 + +| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log | json | +| :----------------------------------------------------------- | :------------: | :--: | :------: | :------: | :------: | :------: | :---------------------: | :--------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [tsn_r50_video_1x1x8_100e_kinetics600_rgb](/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py) | short-side 256 | 8x2 | ResNet50 | ImageNet | 74.8 | 92.3 | 11.1 (25x3 frames) | 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb/tsn_r50_video_1x1x8_100e_kinetics600_rgb_20201015-4db3c461.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb/tsn_r50_video_1x1x8_100e_kinetics600_rgb_20201015.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb/tsn_r50_video_1x1x8_100e_kinetics600_rgb_20201015.json) | + +### Kinetics-700 + +| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | inference_time(video/s) | gpu_mem(M) | ckpt | log | json | +| :----------------------------------------------------------- | :------------: | :--: | :------: | :------: | :------: | :------: | :---------------------: | :--------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [tsn_r50_video_1x1x8_100e_kinetics700_rgb](/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py) | short-side 256 | 8x2 | ResNet50 | ImageNet | 61.7 | 83.6 | 11.1 (25x3 frames) | 8344 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb/tsn_r50_video_1x1x8_100e_kinetics700_rgb_20201015-e381a6c7.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb/tsn_r50_video_1x1x8_100e_kinetics700_rgb_20201015.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb/tsn_r50_video_1x1x8_100e_kinetics700_rgb_20201015.json) | + +### Something-Something V1 + +|config|resolution | gpus| backbone |pretrain| top1 acc| top5 acc | reference top1 acc | reference top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r50_1x1x8_50e_sthv1_rgb](/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py)|height 100 |8| ResNet50 | ImageNet|18.55 |44.80 |[17.53](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[44.29](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 10978 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb/tsn_r50_1x1x8_50e_sthv1_rgb_20200618-061b9195.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb/tsn_sthv1.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb/tsn_r50_f8_sthv1_18.1_45.0.log.json)| +|[tsn_r50_1x1x16_50e_sthv1_rgb](/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py)| height 100 |8| ResNet50| ImageNet |15.77 |39.85 |[13.33](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[35.58](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 5691 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb/tsn_r50_1x1x16_50e_sthv1_rgb_20200614-7e2fe4f1.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb/20200614_211932.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb/20200614_211932.log.json)| + +### Something-Something V2 + +|config |resolution| gpus| backbone| pretrain | top1 acc| top5 acc | reference top1 acc | reference top5 acc | gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r50_1x1x8_50e_sthv2_rgb](/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py)|height 240 |8| ResNet50| ImageNet |32.97 |63.62 |[30.56](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[58.49](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)| 10966 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb/tsn_r50_1x1x8_50e_sthv2_rgb_20200915-f3b381a5.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb/20200915_114139.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb/20200915_114139.log.json)| +|[tsn_r50_1x1x16_50e_sthv2_rgb](/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py)| height 240 |8|ResNet50| ImageNet |27.21 |55.84 |[21.91](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|[46.87](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd#training)|8337| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb/tsn_r50_1x1x16_50e_sthv2_rgb_20200917-80bc3611.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb/20200917_105855.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb/20200917_105855.log.json)| + +### Moments in Time + +|config |resolution| gpus| backbone | pretrain | top1 acc| top5 acc | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r50_1x1x6_100e_mit_rgb](/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py)|short-side 256 |8x2| ResNet50| ImageNet |26.84|51.6| 8339| [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb/tsn_r50_1x1x6_100e_mit_rgb_20200618-d512ab1b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb/tsn_mit.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb/tsn_r50_f6_mit_26.8_51.6.log.json)| + +### Multi-Moments in Time + +|config | resolution|gpus| backbone | pretrain | mAP| gpu_mem(M) | ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r101_1x1x5_50e_mmit_rgb](/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py)|short-side 256 |8x2| ResNet101| ImageNet |61.09| 10467 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb/tsn_r101_1x1x5_50e_mmit_rgb_20200618-642f450d.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb/tsn_mmit.log)| [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb/tsn_r101_f6_mmit_61.1.log.json)| + +### ActivityNet v1.3 + +| config | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | gpu_mem(M) | ckpt | log | json | +| :----------------------------------------------------------- | :--------: | :--: | :------: | :---------: | :------: | :------: | :--------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [tsn_r50_320p_1x1x8_50e_activitynet_video_rgb](/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py) | 340x256 | 8x1 | ResNet50 | Kinetics400 | 73.97 | 93.46 | 5692 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb_20200804-9e15687e.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb_20200804.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb_20200804.json) | +| [tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb](/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py) | 340x256 | 8x1 | ResNet50 | Kinetics400 | 76.07 | 94.10 | 5692 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb_20200804-f7bcbf34.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb_20200804.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb_20200804.json) | +| [tsn_r50_320p_1x1x8_150e_activitynet_video_flow](/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py) | 340x256 | 8x2 | ResNet50 | Kinetics400 | 58.70 | 84.72 | 5780 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow/tsn_r50_320p_1x1x8_150e_activitynet_video_flow_20200804-13313f52.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow/tsn_r50_320p_1x1x8_150e_activitynet_video_flow_20200804.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow/tsn_r50_320p_1x1x8_150e_activitynet_video_flow_20200804.json) | +| [tsn_r50_320p_1x1x8_150e_activitynet_clip_flow](/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py) | 340x256 | 8x2 | ResNet50 | Kinetics400 | 59.51 | 82.69 | 5780 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow_20200804-8622cf38.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow_20200804.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow_20200804.json) | + +### HVU + +| config[1] | tag category | resolution | gpus | backbone | pretrain | mAP | HATNet[2] | HATNet-multi[2] | ckpt | log | json | +| :----------------------------------------------------------: | :----------: | :------------: | :--: | :------: | :------: | :--: | :-------: | :-------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | +| [tsn_r18_1x1x8_100e_hvu_action_rgb](/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py) | action | short-side 256 | 8x2 | ResNet18 | ImageNet | 57.5 | 51.8 | 53.5 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/action/tsn_r18_1x1x8_100e_hvu_action_rgb_20201027-011b282b.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/action/tsn_r18_1x1x8_100e_hvu_action_rgb_20201027.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/action/tsn_r18_1x1x8_100e_hvu_action_rgb_20201027.json) | +| [tsn_r18_1x1x8_100e_hvu_scene_rgb](/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py) | scene | short-side 256 | 8 | ResNet18 | ImageNet | 55.2 | 55.8 | 57.2 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/scene/tsn_r18_1x1x8_100e_hvu_scene_rgb_20201027-00e5748d.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/scene/tsn_r18_1x1x8_100e_hvu_scene_rgb_20201027.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/scene/tsn_r18_1x1x8_100e_hvu_scene_rgb_20201027.json) | +| [tsn_r18_1x1x8_100e_hvu_object_rgb](/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py) | object | short-side 256 | 8 | ResNet18 | ImageNet | 45.7 | 34.2 | 35.1 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/object/tsn_r18_1x1x8_100e_hvu_object_rgb_20201102-24a22f30.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/object/tsn_r18_1x1x8_100e_hvu_object_rgb_20201027.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/object/tsn_r18_1x1x8_100e_hvu_object_rgb_20201027.json) | +| [tsn_r18_1x1x8_100e_hvu_event_rgb](/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py) | event | short-side 256 | 8 | ResNet18 | ImageNet | 63.7 | 38.5 | 39.8 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/event/tsn_r18_1x1x8_100e_hvu_event_rgb_20201027-dea8cd71.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/event/tsn_r18_1x1x8_100e_hvu_event_rgb_20201027.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/event/tsn_r18_1x1x8_100e_hvu_event_rgb_20201027.json) | +| [tsn_r18_1x1x8_100e_hvu_concept_rgb](/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py) | concept | short-side 256 | 8 | ResNet18 | ImageNet | 47.5 | 26.1 | 27.3 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/concept/tsn_r18_1x1x8_100e_hvu_concept_rgb_20201027-fc1dd8e3.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/concept/tsn_r18_1x1x8_100e_hvu_concept_rgb_20201027.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/concept/tsn_r18_1x1x8_100e_hvu_concept_rgb_20201027.json) | +| [tsn_r18_1x1x8_100e_hvu_attribute_rgb](/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py) | attribute | short-side 256 | 8 | ResNet18 | ImageNet | 46.1 | 33.6 | 34.9 | [ckpt](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/attribute/tsn_r18_1x1x8_100e_hvu_attribute_rgb_20201027-0b3b49d2.pth) | [log](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/attribute/tsn_r18_1x1x8_100e_hvu_attribute_rgb_20201027.log) | [json](https://download.openmmlab.com/mmaction/recognition/tsn/hvu/attribute/tsn_r18_1x1x8_100e_hvu_attribute_rgb_20201027.json) | +| - | Overall | short-side 256 | - | ResNet18 | ImageNet | 52.6 | 40.0 | 41.3 | - | - | - | + +[1] For simplicity, we train a specific model for each tag category as the baselines for HVU. + +[2] The performance of HATNet and HATNet-multi are from the paper [Large Scale Holistic Video Understanding](https://pages.iai.uni-bonn.de/gall_juergen/download/HVU_eccv20.pdf). The proposed HATNet is a 2 branch Convolution Network (one 2D branch, one 3D branch) and share the same backbone(ResNet18) with us. The inputs of HATNet are 16 or 32 frames long video clips (which is much larger than us), while the input resolution is coarser (112 instead of 224). HATNet is trained on each individual task (each tag category) while HATNet-multi is trained on multiple tasks. Since there is no released codes or models for the HATNet, we just include the performance reported by the original paper. + +Notes: + +1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. + According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, + e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, + not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. +3. The values in columns named after "reference" are the results got by training on the original repo, using the same model settings. + +For more details on data preparation, you can refer to + +* [preparing_ucf101](/tools/data/ucf101/README.md) +* [preparing_kinetics](/tools/data/kinetics/README.md) +* [preparing_sthv1](/tools/data/sthv1/README.md) +* [preparing_sthv2](/tools/data/sthv2/README.md) +* [preparing_mit](/tools/data/mit/README.md) +* [preparing_mmit](/tools/data/mmit/README.md) +* [preparing_hvu](/tools/data/hvu/README.md) +* [preparing_hmdb51](/tools/data/hmdb51/README.md) + +## Train + +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train TSN model on Kinetics-400 dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py \ + --work-dir work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test + +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test TSN model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..90cd4c6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,116 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_256p' +data_root_val = 'data/kinetics400/rawframes_val_256p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_256p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/tsn_r50_multiscalecrop_256p_1x1x3' + '_100e_kinetics400_rgb/') +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..caacec2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,116 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/tsn_r50_multiscalecrop_320p_1x1x3' + '_100e_kinetics400_rgb/') +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..ef56b33 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/tsn_r50_multiscalecrop_340x256_1x1x3' + '_100e_kinetics400_rgb/') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..d25e9f0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,115 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_256p' +data_root_val = 'data/kinetics400/rawframes_val_256p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_256p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/tsn_r50_randomresizedcrop_256p_1x1x3' + '_100e_kinetics400_rgb/') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..f1e9009 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,115 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/tsn_r50_randomresizedcrop_320p_1x1x3' + '_100e_kinetics400_rgb/') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..e2497ea --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,111 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/tsn_r50_randomresizedcrop_340x256_1x1x3' + '_100e_kinetics400_rgb') +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_10crop_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_10crop_100e_kinetics400_rgb.py new file mode 100644 index 0000000..56da32e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_10crop_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val_256p' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_3crop_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_3crop_100e_kinetics400_rgb.py new file mode 100644 index 0000000..3822444 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_3crop_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val_256p' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_10crop_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_10crop_100e_kinetics400_rgb.py new file mode 100644 index 0000000..8c1b928 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_10crop_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_3crop_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_3crop_100e_kinetics400_rgb.py new file mode 100644 index 0000000..77c5d87 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_3crop_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_10crop_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_10crop_100e_kinetics400_rgb.py new file mode 100644 index 0000000..a6c2819 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_10crop_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_3crop_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_3crop_100e_kinetics400_rgb.py new file mode 100644 index 0000000..5f600f2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_3crop_100e_kinetics400_rgb.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py new file mode 100644 index 0000000..09463f2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py @@ -0,0 +1,126 @@ +# model settings +category_nums = dict( + action=739, attribute=117, concept=291, event=69, object=1678, scene=248) +target_cate = 'action' + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet18', + depth=18, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=category_nums[target_cate], + in_channels=512, + spatial_type='avg', + multi_class=True, + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/hvu/rawframes_train' +data_root_val = 'data/hvu/rawframes_val' +ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' +ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' +ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg')) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=2, metrics=['mean_average_precision']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py new file mode 100644 index 0000000..d6135ba --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py @@ -0,0 +1,126 @@ +# model settings +category_nums = dict( + action=739, attribute=117, concept=291, event=69, object=1678, scene=248) +target_cate = 'attribute' + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet18', + depth=18, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=category_nums[target_cate], + in_channels=512, + spatial_type='avg', + multi_class=True, + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/hvu/rawframes_train' +data_root_val = 'data/hvu/rawframes_val' +ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' +ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' +ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg')) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=2, metrics=['mean_average_precision']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py new file mode 100644 index 0000000..fe90f5c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py @@ -0,0 +1,126 @@ +# model settings +category_nums = dict( + action=739, attribute=117, concept=291, event=69, object=1678, scene=248) +target_cate = 'object' + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet18', + depth=18, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=category_nums[target_cate], + in_channels=512, + spatial_type='avg', + multi_class=True, + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/hvu/rawframes_train' +data_root_val = 'data/hvu/rawframes_val' +ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' +ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' +ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg')) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=2, metrics=['mean_average_precision']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py new file mode 100644 index 0000000..d4b011b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py @@ -0,0 +1,126 @@ +# model settings +category_nums = dict( + action=739, attribute=117, concept=291, event=69, object=1678, scene=248) +target_cate = 'event' + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet18', + depth=18, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=category_nums[target_cate], + in_channels=512, + spatial_type='avg', + multi_class=True, + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/hvu/rawframes_train' +data_root_val = 'data/hvu/rawframes_val' +ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' +ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' +ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg')) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=2, metrics=['mean_average_precision']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py new file mode 100644 index 0000000..fe90f5c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py @@ -0,0 +1,126 @@ +# model settings +category_nums = dict( + action=739, attribute=117, concept=291, event=69, object=1678, scene=248) +target_cate = 'object' + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet18', + depth=18, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=category_nums[target_cate], + in_channels=512, + spatial_type='avg', + multi_class=True, + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/hvu/rawframes_train' +data_root_val = 'data/hvu/rawframes_val' +ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' +ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' +ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg')) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=2, metrics=['mean_average_precision']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py new file mode 100644 index 0000000..7026649 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py @@ -0,0 +1,126 @@ +# model settings +category_nums = dict( + action=739, attribute=117, concept=291, event=69, object=1678, scene=248) +target_cate = 'scene' + +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet18', + depth=18, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=category_nums[target_cate], + in_channels=512, + spatial_type='avg', + multi_class=True, + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/hvu/rawframes_train' +data_root_val = 'data/hvu/rawframes_val' +ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' +ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' +ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=category_nums[target_cate], + filename_tmpl='img_{:05d}.jpg')) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict(interval=2, metrics=['mean_average_precision']) +log_config = dict( + interval=20, hooks=[ + dict(type='TextLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..75e180a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,121 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# fp16 settings +fp16 = dict() +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py new file mode 100644 index 0000000..a71f0e2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py @@ -0,0 +1,134 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet101', + depth=101, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=313, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0), + dropout_ratio=0.5, + init_std=0.01, + multi_class=True, + label_smooth_eps=0)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/mmit/rawframes/training' +data_root_val = '/data/mmit/rawframes/validation/' +ann_file_train = 'data/mmit/mmit_train_list_rawframes.txt' +ann_file_val = 'data/mmit/mmit_val_list_rawframes.txt' +ann_file_test = 'data/mmit/mmit_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=5), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=5, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=5, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='MultiGroupCrop', crop_size=256, groups=1), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + multi_class=True, + num_classes=313), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + multi_class=True, + num_classes=313), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + multi_class=True, + num_classes=313)) +# optimizer +optimizer = dict( + type='SGD', + constructor='TSMOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.01, # this lr is used for 8 gpus + momentum=0.9, + weight_decay=0.0001, +) +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict(interval=5, metrics=['mmit_mean_average_precision']) +# yapf:disable +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r101_1x1x5_50e_mmit_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py new file mode 100644 index 0000000..f885928 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=True), + cls_head=dict( + type='TSNHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv1/rawframes' +data_root_val = 'data/sthv1/rawframes' +ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' +ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' +ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0005) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x16_50e_sthv1_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py new file mode 100644 index 0000000..e0eecd1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py @@ -0,0 +1,107 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv2/rawframes' +data_root_val = 'data/sthv2/rawframes' +ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.005, momentum=0.9, + weight_decay=0.0005) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x16_50e_sthv2_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..a25d5db --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,119 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py new file mode 100644 index 0000000..e012139 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py @@ -0,0 +1,115 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=101, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.001)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ucf101/rawframes/' +data_root_val = 'data/ucf101/rawframes/' +split = 1 # official train/test splits. valid numbers: 1, 2, 3 +ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt' +ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' +ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00128, momentum=0.9, + weight_decay=0.0005) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[]) +total_epochs = 75 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = f'./work_dirs/tsn_r50_1x1x3_75e_ucf101_split_{split}_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py new file mode 100644 index 0000000..1ef66f4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py @@ -0,0 +1,117 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=339, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/mit/rawframes/training' +data_root_val = '/data/mit/rawframes/validation/' +ann_file_train = 'data/mit/mit_train_list_rawframes.txt' +ann_file_val = 'data/mit/mit_val_list_rawframes.txt' +ann_file_test = 'data/mit/mit_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=6), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=6, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=6, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.005, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x6_100e_mit_rgb' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py new file mode 100644 index 0000000..6263652 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py @@ -0,0 +1,107 @@ +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=51, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips=None) +dataset_type = 'RawframeDataset' +data_root = 'data/hmdb51/rawframes' +data_root_val = 'data/hmdb51/rawframes' +ann_file_train = 'data/hmdb51/hmdb51_train_split_1_rawframes.txt' +ann_file_val = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' +ann_file_test = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] +gpu_ids = range(0, 1) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py new file mode 100644 index 0000000..1364145 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py @@ -0,0 +1,107 @@ +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=51, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips=None) +dataset_type = 'RawframeDataset' +data_root = 'data/hmdb51/rawframes' +data_root_val = 'data/hmdb51/rawframes' +ann_file_train = 'data/hmdb51/hmdb51_train_split_1_rawframes.txt' +ann_file_val = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' +ann_file_test = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x8_100e_kinetics400_rgb/tsn_r50_256p_1x1x8_100e_kinetics400_rgb_20200817-883baf16.pth' # noqa: E501 +resume_from = None +workflow = [('train', 1)] +gpu_ids = range(0, 1) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py new file mode 100644 index 0000000..8c9fe3f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py @@ -0,0 +1,105 @@ +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=51, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +train_cfg = None +test_cfg = dict(average_clips=None) +dataset_type = 'RawframeDataset' +data_root = 'data/hmdb51/rawframes' +data_root_val = 'data/hmdb51/rawframes' +ann_file_train = 'data/hmdb51/hmdb51_train_split_1_rawframes.txt' +ann_file_val = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' +ann_file_test = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) +log_config = dict( + interval=5, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x8_50e_hmdb51_mit_rgb/' +load_from = 'https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb/tsn_r50_1x1x6_100e_mit_rgb_20200618-d512ab1b.pth' # noqa: E501 +resume_from = None +workflow = [('train', 1)] +gpu_ids = range(0, 1) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py new file mode 100644 index 0000000..5a4b5e3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py @@ -0,0 +1,123 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=True), + cls_head=dict( + type='TSNHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv1/rawframes' +data_root_val = 'data/sthv1/rawframes' +ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' +ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' +ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.02, momentum=0.9, + weight_decay=0.0005) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x8_50e_sthv1_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py new file mode 100644 index 0000000..1e938b6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py @@ -0,0 +1,109 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=174, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/sthv2/rawframes' +data_root_val = 'data/sthv2/rawframes' +ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.02, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x8_50e_sthv2_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..aa6d60f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,114 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py new file mode 100644 index 0000000..f040bad --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' +ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' +ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.005, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[70, 100]) +total_epochs = 110 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x3_110e_kinetics400_flow/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py new file mode 100644 index 0000000..22b1e64 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py @@ -0,0 +1,114 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py new file mode 100644 index 0000000..7108130 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train_320p' +data_root_val = 'data/kinetics400/rawframes_val_320p' +ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' +ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' +ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{}_{:05d}.jpg', + modality='Flow', + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.001875, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[70, 100]) +total_epochs = 110 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x8_110e_kinetics400_flow/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py new file mode 100644 index 0000000..645202e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='modelzoo/tsn_r50_320p_1x1x8_110e_kinetics400_flow.pth', + depth=50, + in_channels=10, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ActivityNet/flow' +data_root_val = 'data/ActivityNet/flow' +ann_file_train = 'data/ActivityNet/anet_train_clip.txt' +ann_file_val = 'data/ActivityNet/anet_val_clip.txt' +ann_file_test = 'data/ActivityNet/anet_val_clip.txt' +img_norm_cfg = dict(mean=[128, 128], std=[128, 128], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='flow_{}_{:05d}.jpg', + with_offset=True, + modality='Flow', + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='flow_{}_{:05d}.jpg', + with_offset=True, + modality='Flow', + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='flow_{}_{:05d}.jpg', + with_offset=True, + modality='Flow', + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) +# this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[60, 120]) +total_epochs = 150 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow/' +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py new file mode 100644 index 0000000..2911a12 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py @@ -0,0 +1,125 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='modelzoo/tsn_r50_320p_1x1x8_110e_kinetics400_flow.pth', + depth=50, + in_channels=10, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ActivityNet/flow' +data_root_val = 'data/ActivityNet/flow' +ann_file_train = 'data/ActivityNet/anet_train_video.txt' +ann_file_val = 'data/ActivityNet/anet_val_video.txt' +ann_file_test = 'data/ActivityNet/anet_val_clip.txt' +img_norm_cfg = dict(mean=[128, 128], std=[128, 128], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=5, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW_Flow'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='flow_{}_{:05d}.jpg', + with_offset=True, + modality='Flow', + start_index=0, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='flow_{}_{:05d}.jpg', + with_offset=True, + modality='Flow', + start_index=0, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='flow_{}_{:05d}.jpg', + with_offset=True, + modality='Flow', + start_index=0, + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) +# this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[60, 120]) +total_epochs = 150 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x8_150e_activitynet_video_flow/' +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py new file mode 100644 index 0000000..f97d70e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='modelzoo/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.pth', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ActivityNet/rgb' +data_root_val = 'data/ActivityNet/rgb' +ann_file_train = 'data/ActivityNet/anet_train_clip.txt' +ann_file_val = 'data/ActivityNet/anet_val_clip.txt' +ann_file_test = 'data/ActivityNet/anet_val_clip.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + with_offset=True, + start_index=0, + filename_tmpl='image_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + with_offset=True, + start_index=0, + filename_tmpl='image_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + with_offset=True, + start_index=0, + filename_tmpl='image_{:05d}.jpg')) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) +# this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb/' +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py new file mode 100644 index 0000000..c2c016e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='modelzoo/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.pth', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ActivityNet/rgb' +data_root_val = 'data/ActivityNet/rgb' +ann_file_train = 'data/ActivityNet/anet_train_video.txt' +ann_file_val = 'data/ActivityNet/anet_val_video.txt' +ann_file_test = 'data/ActivityNet/anet_val_clip.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline, + with_offset=True, + start_index=0, + filename_tmpl='image_{:05d}.jpg'), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline, + with_offset=True, + start_index=0, + filename_tmpl='image_{:05d}.jpg'), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline, + with_offset=True, + start_index=0, + filename_tmpl='image_{:05d}.jpg')) +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) +# this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[20, 40]) +total_epochs = 50 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb/' +load_from = None +resume_from = None +workflow = [('train', 5)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py new file mode 100644 index 0000000..580c126 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py @@ -0,0 +1,121 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.5, + init_std=0.001)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=5), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=4), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.03, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_dense_1x1x5_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py new file mode 100644 index 0000000..349e423 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py @@ -0,0 +1,120 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.005, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_dense_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..6941d2a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,46 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'RawframeDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py new file mode 100644 index 0000000..43305ed --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py @@ -0,0 +1,121 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_video_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py new file mode 100644 index 0000000..4bd634f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py @@ -0,0 +1,116 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=600, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics600/videos_train' +data_root_val = 'data/kinetics600/videos_val' +ann_file_train = 'data/kinetics600/kinetics600_train_list_videos.txt' +ann_file_val = 'data/kinetics600/kinetics600_val_list_videos.txt' +ann_file_test = 'data/kinetics600/kinetics600_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics600_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py new file mode 100644 index 0000000..9155224 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py @@ -0,0 +1,116 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=700, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics700/videos_train' +data_root_val = 'data/kinetics700/videos_val' +ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' +ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' +ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=12, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.00375, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics700_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..476c2d5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,116 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='DecordDecode'), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_video_1x1x3_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py new file mode 100644 index 0000000..5870693 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py @@ -0,0 +1,122 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips=None) +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='DecordInit'), + dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='DenseSampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + val_dataloader=dict(videos_per_gpu=1), + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 100 +checkpoint_config = dict(interval=1) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py new file mode 100644 index 0000000..b22ca57 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py @@ -0,0 +1,46 @@ +# model settings +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +# model training and testing settings +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'VideoDataset' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +test_pipeline = [ + dict(type='OpenCVInit', num_threads=1), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='OpenCVDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/README.md new file mode 100644 index 0000000..a3a0942 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/README.md @@ -0,0 +1,45 @@ +# X3D + +## Introduction +``` +@misc{feichtenhofer2020x3d, + title={X3D: Expanding Architectures for Efficient Video Recognition}, + author={Christoph Feichtenhofer}, + year={2020}, + eprint={2004.04730}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | resolution | backbone | top1 10-view | top1 30-view | reference top1 10-view | reference top1 30-view | ckpt | +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[x3d_s_13x6x1_facebook_kinetics400_rgb](/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py)|short-side 320| X3D_S | 72.7 | 73.2 | 73.1 [[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)] | 73.5 [[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)] | [ckpt](https://download.openmmlab.com/mmaction/recognition/x3d/facebook/x3d_s_facebook_13x6x1_kinetics400_rgb_20201027-623825a0.pth)[1] | +|[x3d_m_16x5x1_facebook_kinetics400_rgb](/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb)|short-side 320| X3D_M | 75.0 | 75.6 | 75.1 [[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)] | 76.2 [[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)] | [ckpt](https://download.openmmlab.com/mmaction/recognition/x3d/facebook/x3d_m_facebook_16x5x1_kinetics400_rgb_20201027-3f42382a.pth)[1] | + +[1] The models are ported from the repo [SlowFast](https://github.com/facebookresearch/SlowFast/) and tested on our data. Currently, we only support the testing of X3D models, training will be available soon. + +Notes: + +3. The values in columns named after "reference" are the results got by testing the checkpoint released on the original repo and codes, using the same dataset with ours. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Test +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test X3D model on Kinetics-400 dataset and dump the result to a json file. +```shell +python tools/test.py configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json --average-clips prob +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb.py new file mode 100644 index 0000000..ebc06c7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb.py @@ -0,0 +1,45 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2), + cls_head=dict( + type='X3DHead', + in_channels=432, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5, + fc1_bias=False)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[114.75, 114.75, 114.75], std=[57.38, 57.38, 57.38], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=16, + frame_interval=5, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) + +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py b/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py new file mode 100644 index 0000000..e378316 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py @@ -0,0 +1,45 @@ +# model settings +model = dict( + type='Recognizer3D', + backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2), + cls_head=dict( + type='X3DHead', + in_channels=432, + num_classes=400, + spatial_type='avg', + dropout_ratio=0.5, + fc1_bias=False)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'RawframeDataset' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +img_norm_cfg = dict( + mean=[114.75, 114.75, 114.75], std=[57.38, 57.38, 57.38], to_bgr=False) +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=13, + frame_interval=6, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 192)), + dict(type='CenterCrop', crop_size=192), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +data = dict( + videos_per_gpu=1, + workers_per_gpu=2, + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) + +dist_params = dict(backend='nccl') diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/audioonly/audioonly_r50_64x1x1_100e_kinetics400_audio_feature.py b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/audioonly/audioonly_r50_64x1x1_100e_kinetics400_audio_feature.py new file mode 100644 index 0000000..a223876 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/audioonly/audioonly_r50_64x1x1_100e_kinetics400_audio_feature.py @@ -0,0 +1,102 @@ +# model settings +model = dict( + type='AudioRecognizer', + backbone=dict( + type='ResNetAudio', + depth=50, + pretrained=None, + in_channels=1, + norm_eval=False), + cls_head=dict( + type='AudioTSNHead', + num_classes=400, + in_channels=1024, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'AudioFeatureDataset' +data_root = 'data/kinetics400/audio_feature_train' +data_root_val = 'data/kinetics400/audio_feature_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_audio_feature.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' +train_pipeline = [ + dict(type='LoadAudioFeature'), + dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), + dict(type='AudioFeatureSelector'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +val_pipeline = [ + dict(type='LoadAudioFeature'), + dict( + type='SampleFrames', + clip_len=64, + frame_interval=1, + num_clips=1, + test_mode=True), + dict(type='AudioFeatureSelector'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +test_pipeline = [ + dict(type='LoadAudioFeature'), + dict( + type='SampleFrames', + clip_len=64, + frame_interval=1, + num_clips=10, + test_mode=True), + dict(type='AudioFeatureSelector'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +data = dict( + videos_per_gpu=160, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=2.0, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=1, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = ('./work_dirs/' + + 'audioonly_r50_64x1x1_100e_kinetics400_audio_feature/') +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/README.md b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/README.md new file mode 100644 index 0000000..90d03aa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/README.md @@ -0,0 +1,73 @@ +# ResNet for Audio + +## Introduction +``` +@article{xiao2020audiovisual, + title={Audiovisual SlowFast Networks for Video Recognition}, + author={Xiao, Fanyi and Lee, Yong Jae and Grauman, Kristen and Malik, Jitendra and Feichtenhofer, Christoph}, + journal={arXiv preprint arXiv:2001.08740}, + year={2020} +} +``` + +## Model Zoo + +### Kinetics-400 + +|config | n_fft | gpus | backbone |pretrain| top1 acc/delta| top5 acc/delta | inference_time(video/s) | gpu_mem(M)| ckpt | log| json| +|:--|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:| +|[tsn_r18_64x1x1_100e_kinetics400_audio_feature](/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py)|1024|8| ResNet18 | None |19.7|35.75|x|1897|[ckpt](https://download.openmmlab.com/mmaction/recognition/audio_recognition/tsn_r18_64x1x1_100e_kinetics400_audio_feature/tsn_r18_64x1x1_100e_kinetics400_audio_feature_20201012-bf34df6c.pth)|[log](https://download.openmmlab.com/mmaction/recognition/audio_recognition/tsn_r18_64x1x1_100e_kinetics400_audio_feature/20201010_144630.log)|[json](https://download.openmmlab.com/mmaction/recognition/audio_recognition/tsn_r18_64x1x1_100e_kinetics400_audio_feature/20201010_144630.log.json)| +|[tsn_r18_64x1x1_100e_kinetics400_audio_feature](/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py) + [tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb](/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py)|1024|8| ResNet(18+50) | None |71.50(+0.39)|90.18(+0.14)|x|x|x|x|x| + +Notes: + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, +e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. +2. The **inference_time** is got by this [benchmark script](/tools/analysis/benchmark.py), where we use the sampling frames strategy of the test setting and only care about the model inference time, +not including the IO time and pre-processing time. For each setting, we use 1 gpu and set batch size (videos per gpu) to 1 to calculate the inference time. +3. The values in columns named after "reference" are the results got by training on the original repo, using the same model settings. + +For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md). + +## Train + +You can use the following command to train a model. +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train ResNet model on Kinetics-400 audio dataset in a deterministic option with periodic validation. +```shell +python tools/train.py configs/audio_recognition/tsn_r50_64x1x1_100e_kinetics400_audio_feature.py \ + --work-dir work_dirs/tsn_r50_64x1x1_100e_kinetics400_audio_feature \ + --validate --seed 0 --deterministic +``` + +For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting). + +## Test + +You can use the following command to test a model. +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test ResNet model on Kinetics-400 audio dataset and dump the result to a json file. +```shell +python tools/test.py configs/audio_recognition/tsn_r50_64x1x1_100e_kinetics400_audio_feature.py \ + checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ + --out result.json +``` + +For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset). + +## Fusion +For multi-modality fusion, you can use the simple [script](/tools/analysis/report_accuracy.py), the standard usage is: + +```shell +python tools/analysis/report_accuracy.py --scores ${AUDIO_RESULT_PKL} ${VISUAL_RESULT_PKL} --datalist data/kinetics400/kinetics400_val_list_rawframes.txt --coefficient 1 1 +``` + ++ AUDIO_RESULT_PKL: The saved output file of `tools/test.py` by the argument `--out`. ++ VISUAL_RESULT_PKL: The saved output file of `tools/test.py` by the argument `--out`. diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py new file mode 100644 index 0000000..4c85dbc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py @@ -0,0 +1,96 @@ +# model settings +model = dict( + type='AudioRecognizer', + backbone=dict(type='ResNet', depth=18, in_channels=1, norm_eval=False), + cls_head=dict( + type='AudioTSNHead', + num_classes=400, + in_channels=512, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'AudioFeatureDataset' +data_root = 'data/kinetics400/audio_feature_train' +data_root_val = 'data/kinetics400/audio_feature_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_audio_feature.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' +train_pipeline = [ + dict(type='LoadAudioFeature'), + dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), + dict(type='AudioFeatureSelector'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +val_pipeline = [ + dict(type='LoadAudioFeature'), + dict( + type='SampleFrames', + clip_len=64, + frame_interval=1, + num_clips=1, + test_mode=True), + dict(type='AudioFeatureSelector'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +test_pipeline = [ + dict(type='LoadAudioFeature'), + dict( + type='SampleFrames', + clip_len=64, + frame_interval=1, + num_clips=1, + test_mode=True), + dict(type='AudioFeatureSelector'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +data = dict( + videos_per_gpu=320, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r18_64x1x1_100e_kinetics400_audio_feature/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r50_64x1x1_100e_kinetics400_audio.py b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r50_64x1x1_100e_kinetics400_audio.py new file mode 100644 index 0000000..4086e42 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r50_64x1x1_100e_kinetics400_audio.py @@ -0,0 +1,102 @@ +# model settings +model = dict( + type='AudioRecognizer', + backbone=dict(type='ResNet', depth=50, in_channels=1, norm_eval=False), + cls_head=dict( + type='AudioTSNHead', + num_classes=400, + in_channels=2048, + dropout_ratio=0.5, + init_std=0.01)) +# model training and testing settings +train_cfg = None +test_cfg = dict(average_clips='prob') +# dataset settings +dataset_type = 'AudioDataset' +data_root = 'data/kinetics400/audios' +data_root_val = 'data/kinetics400/audios' +ann_file_train = 'data/kinetics400/kinetics400_train_list_audio.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_audio.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_audio.txt' +train_pipeline = [ + dict(type='AudioDecodeInit'), + dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), + dict(type='AudioDecode'), + dict(type='AudioAmplify', ratio=1.5), + dict(type='MelLogSpectrogram'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +val_pipeline = [ + dict(type='AudioDecodeInit'), + dict( + type='SampleFrames', + clip_len=64, + frame_interval=1, + num_clips=1, + test_mode=True), + dict(type='AudioDecode'), + dict(type='AudioAmplify', ratio=1.5), + dict(type='MelLogSpectrogram'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +test_pipeline = [ + dict(type='AudioDecodeInit'), + dict( + type='SampleFrames', + clip_len=64, + frame_interval=1, + num_clips=1, + test_mode=True), + dict(type='AudioDecodeInit'), + dict(type='AudioAmplify', ratio=1.5), + dict(type='MelLogSpectrogram'), + dict(type='FormatAudioShape', input_format='NCTF'), + dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['audios']) +] +data = dict( + videos_per_gpu=320, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) +# optimizer +optimizer = dict( + type='SGD', lr=0.1, momentum=0.9, + weight_decay=0.0001) # this lr is used for 8 gpus +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='CosineAnnealing', min_lr=0) +total_epochs = 100 +checkpoint_config = dict(interval=5) +evaluation = dict( + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) +log_config = dict( + interval=20, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook'), + ]) +# runtime settings +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/tsn_r50_64x1x1_100e_kinetics400_audio/' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/Downstream/Open-Set-Action-Recognition/docker/Dockerfile b/Downstream/Open-Set-Action-Recognition/docker/Dockerfile new file mode 100644 index 0000000..f746272 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docker/Dockerfile @@ -0,0 +1,25 @@ +ARG PYTORCH="1.6.0" +ARG CUDA="10.1" +ARG CUDNN="7" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" + +RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 ffmpeg \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install mmcv-full +RUN pip install mmcv-full==latest+torch1.6.0+cu101 -f https://download.openmmlab.com/mmcv/dist/index.html + +# Install MMAction2 +RUN conda clean --all +RUN git clone https://github.com/open-mmlab/mmaction2.git /mmaction2 +WORKDIR /mmaction2 +RUN mkdir -p /mmaction2/data +ENV FORCE_CUDA="1" +RUN pip install cython --no-cache-dir +RUN pip install --no-cache-dir -e . diff --git a/Downstream/Open-Set-Action-Recognition/docs/Makefile b/Downstream/Open-Set-Action-Recognition/docs/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/Downstream/Open-Set-Action-Recognition/docs/api.rst b/Downstream/Open-Set-Action-Recognition/docs/api.rst new file mode 100644 index 0000000..364003b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/api.rst @@ -0,0 +1,104 @@ +API Reference +============= + +mmaction.apis +------------- +.. automodule:: mmaction.apis + :members: + +mmaction.core +------------- + +optimizer +^^^^^^^^^ +.. automodule:: mmaction.core.optimizer + :members: + +evaluation +^^^^^^^^^^ +.. automodule:: mmaction.core.evaluation + :members: + +lr +^^ +.. automodule:: mmaction.core.lr + :members: + +mmaction.localization +--------------------- + +localization +^^^^^^^^^^^^ +.. automodule:: mmaction.localization + :members: + +mmaction.models +--------------- + +models +^^^^^^ +.. automodule:: mmaction.models + :members: + +recognizers +^^^^^^^^^^^ +.. automodule:: mmaction.models.recognizers + :members: + +localizers +^^^^^^^^^^ +.. automodule:: mmaction.models.localizers + :members: + +common +^^^^^^ +.. automodule:: mmaction.models.common + :members: + +backbones +^^^^^^^^^ +.. automodule:: mmaction.models.backbones + :members: + +heads +^^^^^ +.. automodule:: mmaction.models.heads + :members: + +necks +^^^^^ +.. automodule:: mmaction.models.necks + :members: + +losses +^^^^^^ +.. automodule:: mmaction.models.losses + :members: + +mmaction.datasets +----------------- + +datasets +^^^^^^^^ +.. automodule:: mmaction.datasets + :members: + +pipelines +^^^^^^^^^ +.. automodule:: mmaction.datasets.pipelines + :members: + +samplers +^^^^^^^^ +.. automodule:: mmaction.datasets.samplers + :members: + +mmaction.utils +-------------- +.. automodule:: mmaction.utils + :members: + +mmaction.localization +--------------------- +.. automodule:: mmaction.localization + :members: diff --git a/Downstream/Open-Set-Action-Recognition/docs/benchmark.md b/Downstream/Open-Set-Action-Recognition/docs/benchmark.md new file mode 100644 index 0000000..5eb81ec --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/benchmark.md @@ -0,0 +1,137 @@ +# Benchmark + +We compare our results with some popular frameworks and official releases in terms of speed. + +## Settings +### Hardware + +- 8 NVIDIA Tesla V100 (32G) GPUs +- Intel(R) Xeon(R) Gold 6146 CPU @ 3.20GHz + +### Software Environment + +- Python 3.7 +- PyTorch 1.4 +- CUDA 10.1 +- CUDNN 7.6.03 +- NCCL 2.4.08 + +### Metrics +The time we measured is the average training time for an iteration, including data processing and model training. +The training speed is measure with s/iter. The lower, the better. Note that we skip the first 50 iter times as they may contain the device warmup time. + +### Comparison Rules + +Here we compare our MMAction2 repo with other video understanding toolboxes in the same data and model settings +by the training time per iteration. Here, we use +- commit id [7f3490d](https://github.com/open-mmlab/mmaction/tree/7f3490d3db6a67fe7b87bfef238b757403b670e3)(1/5/2020) of MMAction +- commit id [8d53d6f](https://github.com/mit-han-lab/temporal-shift-module/tree/8d53d6fda40bea2f1b37a6095279c4b454d672bd)(5/5/2020) of Temporal-Shift-Module +- commit id [8299c98](https://github.com/facebookresearch/SlowFast/tree/8299c9862f83a067fa7114ce98120ae1568a83ec)(7/7/2020) of PySlowFast +- commit id [f13707f](https://github.com/wzmsltw/BSN-boundary-sensitive-network/tree/f13707fbc362486e93178c39f9c4d398afe2cb2f)(12/12/2018) of BSN(boundary sensitive network) +- commit id [45d0514](https://github.com/JJBOY/BMN-Boundary-Matching-Network/tree/45d05146822b85ca672b65f3d030509583d0135a)(17/10/2019) of BMN(boundary matching network) + +To ensure the fairness of the comparison, the comparison experiments were conducted under the same hardware environment and using the same dataset. The rawframe dataset we used is generated by the [data preparation tools](/tools/data/kinetics/README.md), the video dataset we used is a special version of resized video cache called '256p dense-encoded video', featuring a faster decoding speed which is generated by the scripts [here](/tools/data/resize_video.py). Significant improvement can be observed when comparing with normal 256p videos as shown in the table below, especially when the sampling is sparse(like [TSN](/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py)). + +For each model setting, we kept the same data preprocessing methods to make sure the same feature input. +In addition, we also used Memcached, a distributed cached system, to load the data for the same IO time except for fair comparisons with Pyslowfast which uses raw videos directly from disk by default. + +We provide the training log based on which we calculate the average iter time, with the actual setting logged inside, feel free to verify it and fire an issue if something does not make sense. + +## Main Results +### Recognizers + +| Model |input| io backend | batch size x gpus | MMAction2 (s/iter) | GPU mem(GB) | MMAction (s/iter)| GPU mem(GB) | Temporal-Shift-Module (s/iter) | GPU mem(GB) | PySlowFast (s/iter)| GPU mem(GB) | +| :--- | :---------------:|:---------------:| :---------------:| :---------------: | :--------------------: | :----------------------------: | :-----------------: |:-----------------: |:-----------------: |:-----------------: |:-----------------: | +| [TSN](/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py)| 256p rawframes |Memcached| 32x8|**[0.32](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/tsn_256p_rawframes_memcahed_32x8.zip)** | 8.1 |[0.38](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction/tsn_256p_rawframes_memcached_32x8.zip)|8.1| [0.42](https://download.openmmlab.com/mmaction/benchmark/recognition/temporal_shift_module/tsn_256p_rawframes_memcached_32x8.zip)|10.5 | x |x | +| [TSN](/configs/recognition/tsn/tsn_r50_video_1x1x3_100e_kinetics400_rgb.py)| 256p videos |Disk| 32x8|**[1.42](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/tsn_256p_videos_disk_32x8.zip)** | 8.1 | x |x |x| x | TODO |TODO| +| [TSN](/configs/recognition/tsn/tsn_r50_video_1x1x3_100e_kinetics400_rgb.py)| 256p dense-encoded video |Disk| 32x8|**[0.61](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/tsn_256p_fast_videos_disk_32x8.zip)**|8.1 | x |x| x |x| TODO |TODO| +|[I3D heavy](/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py)|256p videos|Disk |8x8| **[0.34](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/i3d_heavy_256p_videos_disk_8x8.zip)** |4.6|x |x| x |x| [0.44](https://download.openmmlab.com/mmaction/benchmark/recognition/pyslowfast/pysf_i3d_r50_8x8_video.log) |4.6| +|[I3D heavy](/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py)|256p dense-encoded video|Disk |8x8| **[0.35](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/i3d_heavy_256p_fast_videos_disk_8x8.zip)**| 4.6 | x | x | x | x | [0.36](https://download.openmmlab.com/mmaction/benchmark/recognition/pyslowfast/pysf_i3d_r50_8x8_fast_video.log) |4.6| +| [I3D](/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py)|256p rawframes|Memcached|8x8| **[0.43](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/i3d_256p_rawframes_memcahed_8x8.zip)**|5.0 | [0.56](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction/i3d_256p_rawframes_memcached_8x8.zip)|5.0| x |x| x |x| +| [TSM](/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py) |256p rawframes|Memcached| 8x8|**[0.31](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/tsm_256p_rawframes_memcahed_8x8.zip)** |6.9| x |x| [0.41](https://download.openmmlab.com/mmaction/benchmark/recognition/temporal_shift_module/tsm_256p_rawframes_memcached_8x8.zip) |9.1| x|x | +| [Slowonly](/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py)|256p videos|Disk|8x8 | **[0.32](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/slowonly_256p_videos_disk_8x8.zip)** |3.1| TODO|TODO | x|x | [0.34](https://download.openmmlab.com/mmaction/benchmark/recognition/pyslowfast/pysf_slowonly_r50_4x16_video.log)|3.4 | +| [Slowonly](/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py)|256p dense-encoded video|Disk|8x8 | **[0.25](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/slowonly_256p_fast_videos_disk_8x8.zip)** |3.1| TODO |TODO| x |x| [0.28](https://download.openmmlab.com/mmaction/benchmark/recognition/pyslowfast/pysf_slowonly_r50_4x16_fast_video.log) |3.4| +| [Slowfast](/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py)|256p videos|Disk|8x8 | **[0.69](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/slowfast_256p_videos_disk_8x8.zip)**|6.1 | x |x| x |x| [1.04](https://download.openmmlab.com/mmaction/benchmark/recognition/pyslowfast/pysf_slowfast_r50_4x16_video.log)|7.0 | +| [Slowfast](/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py)|256p dense-encoded video|Disk|8x8 | **[0.68](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/slowfast_256p_fast_videos_disk_8x8.zip)** |6.1| x|x | x |x| [0.96](https://download.openmmlab.com/mmaction/benchmark/recognition/pyslowfast/pysf_slowfast_r50_4x16_fast_video.log)|7.0 | +| [R(2+1)D](/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py)|256p videos |Disk| 8x8|**[0.45](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/r2plus1d_256p_videos_disk_8x8.zip)**|5.1 | x | x | x | x | x | x | +| [R(2+1)D](/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py)|256p dense-encoded video |Disk| 8x8|**[0.44](https://download.openmmlab.com/mmaction/benchmark/recognition/mmaction2/r2plus1d_256p_fast_videos_disk_8x8.zip)** |5.1| x|x | x |x| x |x| + +### Localizers + +| Model | MMAction2 (s/iter) | BSN(boundary sensitive network) (s/iter) |BMN(boundary matching network) (s/iter)| +| :--- | :---------------: | :-------------------------------------: | :-------------------------------------: | +| BSN ([TEM + PEM + PGM](/configs/localization/bsn)) | **0.074(TEM)+0.040(PEM)** | 0.101(TEM)+0.040(PEM) | x | +| BMN ([bmn_400x100_2x8_9e_activitynet_feature](/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py)) | **3.27** | x | 3.30 | + + +## Details of Comparison +### TSN ++ **mmaction2** +```shell +# rawframes +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_tsn configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py --work-dir work_dirs/benchmark_tsn_rawframes + +# videos +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_tsn configs/recognition/tsn/tsn_r50_video_1x1x3_100e_kinetics400_rgb.py --work-dir work_dirs/benchmark_tsn_video +``` + ++ **mmaction** +```shell +python -u tools/train_recognizer.py configs/TSN/tsn_kinetics400_2d_rgb_r50_seg3_f1s1.py +``` + ++ **Temporal-Shift-Module** +```shell +python main.py kinetics RGB --arch resnet50 --num_segments 3 --gd 20 --lr 0.02 --wd 1e-4 --lr_steps 20 40 --epochs 1 --batch-size 256 -j 32 --dropout 0.5 --consensus_type=avg --eval-freq=10 --npb --print-freq 1 +``` + +### I3D ++ **mmaction2** +```shell +# rawframes +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_i3d configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py --work-dir work_dirs/benchmark_i3d_rawframes + +# videos +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_i3d configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py --work-dir work_dirs/benchmark_i3d_video +``` + ++ **mmaction** +```shell +python -u tools/train_recognizer.py configs/I3D_RGB/i3d_kinetics400_3d_rgb_r50_c3d_inflate3x1x1_seg1_f32s2.py +``` + ++ **PySlowFast** +```shell +python tools/run_net.py --cfg configs/Kinetics/I3D_8x8_R50.yaml DATA.PATH_TO_DATA_DIR ${DATA_ROOT} NUM_GPUS 8 TRAIN.BATCH_SIZE 64 TRAIN.AUTO_RESUME False LOG_PERIOD 1 SOLVER.MAX_EPOCH 1 > pysf_i3d_r50_8x8_video.log +``` +You may reproduce the result by writting a simple script to parse out the value of the field 'time_diff'. + +### SlowFast ++ **mmaction2** +```shell +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_slowfast configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py --work-dir work_dirs/benchmark_slowfast_video +``` + ++ **PySlowFast** +```shell +python tools/run_net.py --cfg configs/Kinetics/SLOWFAST_4x16_R50.yaml DATA.PATH_TO_DATA_DIR ${DATA_ROOT} NUM_GPUS 8 TRAIN.BATCH_SIZE 64 TRAIN.AUTO_RESUME False LOG_PERIOD 1 SOLVER.MAX_EPOCH 1 > pysf_slowfast_r50_4x16_video.log +``` +You may reproduce the result by writting a simple script to parse out the value of the field 'time_diff'. + +### SlowOnly ++ **mmaction2** +```shell +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_slowonly configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py --work-dir work_dirs/benchmark_slowonly_video +``` + ++ **PySlowFast** +```shell +python tools/run_net.py --cfg configs/Kinetics/SLOW_4x16_R50.yaml DATA.PATH_TO_DATA_DIR ${DATA_ROOT} NUM_GPUS 8 TRAIN.BATCH_SIZE 64 TRAIN.AUTO_RESUME False LOG_PERIOD 1 SOLVER.MAX_EPOCH 1 > pysf_slowonly_r50_4x16_video.log +``` +You may reproduce the result by writting a simple script to parse out the value of the field 'time_diff'. + +### R2plus1D ++ **mmaction2** +```shell +bash tools/slurm_train.sh ${PARTATION_NAME} benchmark_r2plus1d configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py --work-dir work_dirs/benchmark_r2plus1d_video +``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/changelog.md b/Downstream/Open-Set-Action-Recognition/docs/changelog.md new file mode 100644 index 0000000..c27a9fd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/changelog.md @@ -0,0 +1,198 @@ +## Changelog + +### 0.10.0 (31/12/2020) + +**Highlights** + +**New Features** + +**Improvements** +- Add FAQ documents for easy troubleshooting. ([#413](https://github.com/open-mmlab/mmaction2/pull/413), [#420](https://github.com/open-mmlab/mmaction2/pull/420)) + + +**Bug and Typo Fixes** + +**ModelZoo** + +### 0.9.0 (30/11/2020) + +**Highlights** +- Support GradCAM utils for recognizers +- Support ResNet Audio model + +**New Features** +- Automatically add modelzoo statistics to readthedocs ([#327](https://github.com/open-mmlab/mmaction2/pull/327)) +- Support GYM99 data preparation ([#331](https://github.com/open-mmlab/mmaction2/pull/331)) +- Add AudioOnly Pathway from AVSlowFast. ([#355](https://github.com/open-mmlab/mmaction2/pull/355)) +- Add GradCAM utils for recognizer ([#324](https://github.com/open-mmlab/mmaction2/pull/324)) +- Add print config script ([#345](https://github.com/open-mmlab/mmaction2/pull/345)) +- Add online motion vector decoder ([#291](https://github.com/open-mmlab/mmaction2/pull/291)) + +**Improvements** +- Support PyTorch 1.7 in CI ([#312](https://github.com/open-mmlab/mmaction2/pull/312)) +- Support to predict different labels in a long video ([#274](https://github.com/open-mmlab/mmaction2/pull/274)) +- Update docs bout test crops ([#359](https://github.com/open-mmlab/mmaction2/pull/359)) +- Polish code format using pylint manually ([#338](https://github.com/open-mmlab/mmaction2/pull/338)) +- Update unittest coverage ([#358](https://github.com/open-mmlab/mmaction2/pull/358), [#322](https://github.com/open-mmlab/mmaction2/pull/322), [#325](https://github.com/open-mmlab/mmaction2/pull/325)) +- Add random seed for building filelists ([#323](https://github.com/open-mmlab/mmaction2/pull/323)) +- Update colab tutorial ([#367](https://github.com/open-mmlab/mmaction2/pull/367)) +- set default batch_size of evaluation and testing to 1 ([#250](https://github.com/open-mmlab/mmaction2/pull/250)) +- Rename the preparation docs to `README.md` ([#388](https://github.com/open-mmlab/mmaction2/pull/388)) +- Move docs about demo to `demo/README.md` ([#329](https://github.com/open-mmlab/mmaction2/pull/329)) +- Remove redundant code in `tools/test.py` ([#310](https://github.com/open-mmlab/mmaction2/pull/310)) +- Automatically calculate number of test clips for Recognizer2D ([#359](https://github.com/open-mmlab/mmaction2/pull/359)) + +**Bug and Typo Fixes** +- Fix rename Kinetics classnames bug ([#384](https://github.com/open-mmlab/mmaction2/pull/384)) +- Fix a bug in BaseDataset when `data_prefix` is None ([#314](https://github.com/open-mmlab/mmaction2/pull/314)) +- Fix a bug about `tmp_folder` in `OpenCVInit` ([#357](https://github.com/open-mmlab/mmaction2/pull/357)) +- Fix `get_thread_id` when not using disk as backend ([#354](https://github.com/open-mmlab/mmaction2/pull/354), [#357](https://github.com/open-mmlab/mmaction2/pull/357)) +- Fix the bug of HVU object `num_classes` from 1679 to 1678 ([#307](https://github.com/open-mmlab/mmaction2/pull/307)) +- Fix typo in `export_model.md` ([#399](https://github.com/open-mmlab/mmaction2/pull/399)) +- Fix OmniSource training configs ([#321](https://github.com/open-mmlab/mmaction2/pull/321)) +- Fix Issue #306: Bug of SampleAVAFrames ([#317](https://github.com/open-mmlab/mmaction2/pull/317)) + +**ModelZoo** +- Add SlowOnly model for GYM99, both RGB and Flow ([#336](https://github.com/open-mmlab/mmaction2/pull/336)) +- Add auto modelzoo statistics in readthedocs ([#327](https://github.com/open-mmlab/mmaction2/pull/327)) +- Add TSN for HMDB51 pretrained on Kinetics400, Moments in Time and ImageNet. ([#372](https://github.com/open-mmlab/mmaction2/pull/372)) + +### v0.8.0 (31/10/2020) + +**Highlights** +- Support [OmniSource](https://arxiv.org/abs/2003.13042) +- Support C3D +- Support video recognition with audio modality +- Support HVU +- Support X3D + +**New Features** +- Support AVA dataset preparation ([#266](https://github.com/open-mmlab/mmaction2/pull/266)) +- Support the training of video recognition dataset with multiple tag categories ([#235](https://github.com/open-mmlab/mmaction2/pull/235)) +- Support joint training with multiple training datasets of multiple formats, including images, untrimmed videos, etc. ([#242](https://github.com/open-mmlab/mmaction2/pull/242)) +- Support to specify a start epoch to conduct evaluation ([#216](https://github.com/open-mmlab/mmaction2/pull/216)) +- Implement X3D models, support testing with model weights converted from SlowFast ([#288](https://github.com/open-mmlab/mmaction2/pull/288)) +- Support specify a start epoch to conduct evaluation ([#216](https://github.com/open-mmlab/mmaction2/pull/216)) + +**Improvements** +- Set default values of 'average_clips' in each config file so that there is no need to set it explicitly during testing in most cases ([#232](https://github.com/open-mmlab/mmaction2/pull/232)) +- Extend HVU datatools to generate individual file list for each tag category ([#258](https://github.com/open-mmlab/mmaction2/pull/258)) +- Support data preparation for Kinetics-600 and Kinetics-700 ([#254](https://github.com/open-mmlab/mmaction2/pull/254)) +- Use `metric_dict` to replace hardcoded arguments in `evaluate` function ([#286](https://github.com/open-mmlab/mmaction2/pull/286)) +- Add `cfg-options` in arguments to override some settings in the used config for convenience ([#212](https://github.com/open-mmlab/mmaction2/pull/212)) +- Rename the old evaluating protocol `mean_average_precision` as `mmit_mean_average_precision` since it is only used on MMIT and is not the `mAP` we usually talk about. Add `mean_average_precision`, which is the real `mAP` ([#235](https://github.com/open-mmlab/mmaction2/pull/235)) +- Add accurate setting (Three crop * 2 clip) and report corresponding performance for TSM model ([#241](https://github.com/open-mmlab/mmaction2/pull/241)) +- Add citations in each preparing_dataset.md in `tools/data/dataset` ([#289](https://github.com/open-mmlab/mmaction2/pull/289)) +- Update the performance of audio-visual fusion on Kinetics-400 ([#281](https://github.com/open-mmlab/mmaction2/pull/281)) +- Support data preparation of OmniSource web datasets, including GoogleImage, InsImage, InsVideo and KineticsRawVideo ([#294](https://github.com/open-mmlab/mmaction2/pull/294)) +- Use `metric_options` dict to provide metric args in `evaluate` ([#286](https://github.com/open-mmlab/mmaction2/pull/286)) + +**Bug Fixes** +- Register `FrameSelector` in `PIPELINES` ([#268](https://github.com/open-mmlab/mmaction2/pull/268)) +- Fix the potential bug for default value in dataset_setting ([#245](https://github.com/open-mmlab/mmaction2/pull/245)) +- Fix multi-node dist test ([#292](https://github.com/open-mmlab/mmaction2/pull/292)) +- Fix the data preparation bug for `something-something` dataset ([#278](https://github.com/open-mmlab/mmaction2/pull/278)) +- Fix the invalid config url in slowonly README data benchmark ([#249](https://github.com/open-mmlab/mmaction2/pull/249)) +- Validate that the performance of models trained with videos have no significant difference comparing to the performance of models trained with rawframes ([#256](https://github.com/open-mmlab/mmaction2/pull/256)) +- Correct the `img_norm_cfg` used by TSN-3seg-R50 UCF-101 model, improve the Top-1 accuracy by 3% ([#273](https://github.com/open-mmlab/mmaction2/pull/273)) + +**ModelZoo** +- Add Baselines for Kinetics-600 and Kinetics-700, including TSN-R50-8seg and SlowOnly-R50-8x8 ([#259](https://github.com/open-mmlab/mmaction2/pull/259)) +- Add OmniSource benchmark on MiniKineitcs ([#296](https://github.com/open-mmlab/mmaction2/pull/296)) +- Add Baselines for HVU, including TSN-R18-8seg on 6 tag categories of HVU ([#287](https://github.com/open-mmlab/mmaction2/pull/287)) +- Add X3D models ported from [SlowFast](https://github.com/facebookresearch/SlowFast/) ([#288](https://github.com/open-mmlab/mmaction2/pull/288)) + +### v0.7.0 (30/9/2020) + +**Highlights** +- Support TPN +- Support JHMDB, UCF101-24, HVU dataset preparation +- support onnx model conversion + +**New Features** +- Support the data pre-processing pipeline for the HVU Dataset ([#277](https://github.com/open-mmlab/mmaction2/pull/227/)) +- Support real-time action recognition from web camera ([#171](https://github.com/open-mmlab/mmaction2/pull/171)) +- Support onnx ([#160](https://github.com/open-mmlab/mmaction2/pull/160)) +- Support UCF101-24 preparation ([#219](https://github.com/open-mmlab/mmaction2/pull/219)) +- Support evaluating mAP for ActivityNet with [CUHK17_activitynet_pred](http://activity-net.org/challenges/2017/evaluation.html) ([#176](https://github.com/open-mmlab/mmaction2/pull/176)) +- Add the data pipeline for ActivityNet, including downloading videos, extracting RGB and Flow frames, finetuning TSN and extracting feature ([#190](https://github.com/open-mmlab/mmaction2/pull/190)) +- Support JHMDB preparation ([#220](https://github.com/open-mmlab/mmaction2/pull/220)) + +**ModelZoo** +- Add finetuning setting for SlowOnly ([#173](https://github.com/open-mmlab/mmaction2/pull/173)) +- Add TSN and SlowOnly models trained with [OmniSource](https://arxiv.org/abs/2003.13042), which achieve 75.7% Top-1 with TSN-R50-3seg and 80.4% Top-1 with SlowOnly-R101-8x8 ([#215](https://github.com/open-mmlab/mmaction2/pull/215)) + +**Improvements** +- Support demo with video url ([#165](https://github.com/open-mmlab/mmaction2/pull/165)) +- Support multi-batch when testing ([#184](https://github.com/open-mmlab/mmaction2/pull/184)) +- Add tutorial for adding a new learning rate updater ([#181](https://github.com/open-mmlab/mmaction2/pull/181)) +- Add config name in meta info ([#183](https://github.com/open-mmlab/mmaction2/pull/183)) +- Remove git hash in `__version__` ([#189](https://github.com/open-mmlab/mmaction2/pull/189)) +- Check mmcv version ([#189](https://github.com/open-mmlab/mmaction2/pull/189)) +- Update url with 'https://download.openmmlab.com' ([#208](https://github.com/open-mmlab/mmaction2/pull/208)) +- Update Docker file to support PyTorch 1.6 and update `install.md` ([#209](https://github.com/open-mmlab/mmaction2/pull/209)) +- Polish readsthedocs display ([#217](https://github.com/open-mmlab/mmaction2/pull/217), [#229](https://github.com/open-mmlab/mmaction2/pull/229)) + +**Bug Fixes** +- Fix the bug when using OpenCV to extract only RGB frames with original shape ([#184](https://github.com/open-mmlab/mmaction2/pull/187)) +- Fix the bug of sthv2 `num_classes` from 339 to 174 ([#174](https://github.com/open-mmlab/mmaction2/pull/174), [#207](https://github.com/open-mmlab/mmaction2/pull/207)) + + +### v0.6.0 (2/9/2020) + +**Highlights** +- Support TIN, CSN, SSN, NonLocal +- Support FP16 training + +**New Features** +- Support NonLocal module and provide ckpt in TSM and I3D ([#41](https://github.com/open-mmlab/mmaction2/pull/41)) +- Support SSN ([#33](https://github.com/open-mmlab/mmaction2/pull/33), [#37](https://github.com/open-mmlab/mmaction2/pull/37), [#52](https://github.com/open-mmlab/mmaction2/pull/52), [#55](https://github.com/open-mmlab/mmaction2/pull/55)) +- Support CSN ([#87](https://github.com/open-mmlab/mmaction2/pull/87)) +- Support TIN ([#53](https://github.com/open-mmlab/mmaction2/pull/53)) +- Support HMDB51 dataset preparation ([#60](https://github.com/open-mmlab/mmaction2/pull/60)) +- Support encoding videos from frames ([#84](https://github.com/open-mmlab/mmaction2/pull/84)) +- Support FP16 training ([#25](https://github.com/open-mmlab/mmaction2/pull/25)) +- Enhance demo by supporting rawframe inference ([#59](https://github.com/open-mmlab/mmaction2/pull/59)), output video/gif ([#72](https://github.com/open-mmlab/mmaction2/pull/72)) + +**ModelZoo** +- Update Slowfast modelzoo ([#51](https://github.com/open-mmlab/mmaction2/pull/51)) +- Update TSN, TSM video checkpoints ([#50](https://github.com/open-mmlab/mmaction2/pull/50)) +- Add data benchmark for TSN ([#57](https://github.com/open-mmlab/mmaction2/pull/57)) +- Add data benchmark for SlowOnly ([#77](https://github.com/open-mmlab/mmaction2/pull/77)) +- Add BSN/BMN performance results with feature extracted by our codebase ([#99](https://github.com/open-mmlab/mmaction2/pull/99)) + +**Improvements** +- Polish data preparation codes ([#70](https://github.com/open-mmlab/mmaction2/pull/70)) +- Improve data preparation scripts ([#58](https://github.com/open-mmlab/mmaction2/pull/58)) +- Improve unittest coverage and minor fix ([#62](https://github.com/open-mmlab/mmaction2/pull/62)) +- Support PyTorch 1.6 in CI ([#117](https://github.com/open-mmlab/mmaction2/pull/117)) +- Support `with_offset` for rawframe dataset ([#48](https://github.com/open-mmlab/mmaction2/pull/48)) +- Support json annotation files ([#119](https://github.com/open-mmlab/mmaction2/pull/119)) +- Support `multi-class` in TSMHead ([#104](https://github.com/open-mmlab/mmaction2/pull/104)) +- Support using `val_step()` to validate data for each `val` workflow ([#123](https://github.com/open-mmlab/mmaction2/pull/123)) +- Use `xxInit()` method to get `total_frames` and make `total_frames` a required key ([#90](https://github.com/open-mmlab/mmaction2/pull/90)) +- Add paper introduction in model readme ([#140](https://github.com/open-mmlab/mmaction2/pull/140)) +- Adjust the directory structure of `tools/` and rename some scripts files ([#142](https://github.com/open-mmlab/mmaction2/pull/142)) + +**Bug Fixes** +- Fix configs for localization test ([#67](https://github.com/open-mmlab/mmaction2/pull/67)) +- Fix configs of SlowOnly by fixing lr to 8 gpus ([#136](https://github.com/open-mmlab/mmaction2/pull/136)) +- Fix the bug in analyze_log ([#54](https://github.com/open-mmlab/mmaction2/pull/54)) +- Fix the bug of generating HMDB51 class index file ([#69](https://github.com/open-mmlab/mmaction2/pull/69)) +- Fix the bug of using `load_checkpoint()` in ResNet ([#93](https://github.com/open-mmlab/mmaction2/pull/93)) +- Fix the bug of `--work-dir` when using slurm training script ([#110](https://github.com/open-mmlab/mmaction2/pull/110)) +- Correct the sthv1/sthv2 rawframes filelist generate command ([#71](https://github.com/open-mmlab/mmaction2/pull/71)) +- `CosineAnnealing` typo ([#47](https://github.com/open-mmlab/mmaction2/pull/47)) + + +### v0.5.0 (9/7/2020) + +**Highlights** +- MMAction2 is released + +**New Features** +- Support various datasets: UCF101, Kinetics-400, Something-Something V1&V2, Moments in Time, + Multi-Moments in Time, THUMOS14 +- Support various action recognition methods: TSN, TSM, R(2+1)D, I3D, SlowOnly, SlowFast, Non-local +- Support various action localization methods: BSN, BMN +- Colab demo for action recognition diff --git a/Downstream/Open-Set-Action-Recognition/docs/conf.py b/Downstream/Open-Set-Action-Recognition/docs/conf.py new file mode 100644 index 0000000..3aa2ca7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/conf.py @@ -0,0 +1,79 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +sys.path.insert(0, os.path.abspath('..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMAction2' +copyright = '2020, OpenMMLab' +author = 'MMAction2 Authors' +version_file = '../mmaction/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'recommonmark', 'sphinx_markdown_tables' +] + +# numpy and torch are required +autodoc_mock_imports = ['mmaction.version', 'cv2', 'PIL'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- +source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = [] + +master_doc = 'index' + + +def builder_inited_handler(app): + subprocess.run(['./merge_docs.sh']) + subprocess.run(['./stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/Downstream/Open-Set-Action-Recognition/docs/config.md b/Downstream/Open-Set-Action-Recognition/docs/config.md new file mode 100644 index 0000000..368d2bc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/config.md @@ -0,0 +1,495 @@ +# Config System + +We use python files as configs. You can find all the provided configs under `$MMAction2/configs`. + + + +- [Config File Naming Convention](#config-file-naming-convention) +- [Config File Structure](#config-file-structure) + * [Config System for Action localization](#config-system-for-action-localization) + * [Config System for Action Recognition](#config-system-for-action-recognition) +- [FAQ](#faq) + * [Use intermediate variables in configs](#use-intermediate-variables-in-configs) + + + +## Config File Naming Convention + +We follow the style below to name config files. Contributors are advised to follow the same style. + +``` +{model}_[model setting]_{backbone}_[misc]_{data setting}_[gpu x batch_per_gpu]_{schedule}_{dataset}_{modality} +``` + +`{xxx}` is required field and `[yyy]` is optional. + +- `{model}`: model type, e.g. `tsn`, `i3d`, etc. +- `[model setting]`: specific setting for some models. +- `{backbone}`: backbone type, e.g. `r50` (ResNet-50), etc. +- `[misc]`: miscellaneous setting/plugins of model, e.g. `dense`, `320p`, `video`, etc. +- `{data setting}`: frame sample setting in `{clip_len}x{frame_interval}x{num_clips}` format. +- `[gpu x batch_per_gpu]`: GPUs and samples per GPU. +- `{schedule}`: training schedule, e.g. `20e` means 20 epochs. +- `{dataset}`: dataset name, e.g. `kinetics400`, `mmit`, etc. +- `{modality}`: frame modality, e.g. `rgb`, `flow`, etc. + +## Config File Structure + +Please refer to the corresponding pages for config file structure for different tasks. + +### Config System for Action localization + +We incorporate modular design into our config system, +which is convenient to conduct various experiments. + +- An Example of BMN + + To help the users have a basic idea of a complete config structure and the modules in an action localization system, + we make brief comments on the config of BMN as the following. + For more detailed usage and alternative for per parameter in each module, please refer to the API documentation. + + ```python + # model settings + model = dict( # Config of the model + type='BMN', # Type of the localizer + temporal_dim=100, # Total frames selected for each video + boundary_ratio=0.5, # Ratio for determining video boundaries + num_samples=32, # Number of samples for each proposal + num_samples_per_bin=3, # Number of bin samples for each sample + feat_dim=400, # Dimension of feature + soft_nms_alpha=0.4, # Soft NMS alpha + soft_nms_low_threshold=0.5, # Soft NMS low threshold + soft_nms_high_threshold=0.9, # Soft NMS high threshold + post_process_top_k=100) # Top k proposals in post process + # model training and testing settings + train_cfg = None # Config of training hyperparameters for BMN + test_cfg = dict(average_clips='score') # Config for testing hyperparameters for BMN + + # dataset settings + dataset_type = 'ActivityNetDataset' # Type of dataset for training, valiation and testing + data_root = 'data/activitynet_feature_cuhk/csv_mean_100/' # Root path to data for training + data_root_val = 'data/activitynet_feature_cuhk/csv_mean_100/' # Root path to data for validation and testing + ann_file_train = 'data/ActivityNet/anet_anno_train.json' # Path to the annotation file for training + ann_file_val = 'data/ActivityNet/anet_anno_val.json' # Path to the annotation file for validation + ann_file_test = 'data/ActivityNet/anet_anno_test.json' # Path to the annotation file for testing + + train_pipeline = [ # List of training pipeline steps + dict(type='LoadLocalizationFeature'), # Load localization feature pipeline + dict(type='GenerateLocalizationLabels'), # Generate localization labels pipeline + dict( # Config of Collect + type='Collect', # Collect pipeline that decides which keys in the data should be passed to the localizer + keys=['raw_feature', 'gt_bbox'], # Keys of input + meta_name='video_meta', # Meta name + meta_keys=['video_name']), # Meta keys of input + dict( # Config of ToTensor + type='ToTensor', # Convert other types to tensor type pipeline + keys=['raw_feature']), # Keys to be converted from image to tensor + dict( # Config of ToDataContainer + type='ToDataContainer', # Pipeline to convert the data to DataContainer + fields=[dict(key='gt_bbox', stack=False, cpu_only=True)]) # Required fields to be converted with keys and attributes + ] + val_pipeline = [ # List of validation pipeline steps + dict(type='LoadLocalizationFeature'), # Load localization feature pipeline + dict(type='GenerateLocalizationLabels'), # Generate localization labels pipeline + dict( # Config of Collect + type='Collect', # Collect pipeline that decides which keys in the data should be passed to the localizer + keys=['raw_feature', 'gt_bbox'], # Keys of input + meta_name='video_meta', # Meta name + meta_keys=[ + 'video_name', 'duration_second', 'duration_frame', 'annotations', + 'feature_frame' + ]), # Meta keys of input + dict( # Config of ToTensor + type='ToTensor', # Convert other types to tensor type pipeline + keys=['raw_feature']), # Keys to be converted from image to tensor + dict( # Config of ToDataContainer + type='ToDataContainer', # Pipeline to convert the data to DataContainer + fields=[dict(key='gt_bbox', stack=False, cpu_only=True)]) # Required fields to be converted with keys and attributes + ] + test_pipeline = [ # List of testing pipeline steps + dict(type='LoadLocalizationFeature'), # Load localization feature pipeline + dict( # Config of Collect + type='Collect', # Collect pipeline that decides which keys in the data should be passed to the localizer + keys=['raw_feature'], # Keys of input + meta_name='video_meta', # Meta name + meta_keys=[ + 'video_name', 'duration_second', 'duration_frame', 'annotations', + 'feature_frame' + ]), # Meta keys of input + dict( # Config of ToTensor + type='ToTensor', # Convert other types to tensor type pipeline + keys=['raw_feature']), # Keys to be converted from image to tensor + ] + data = dict( # Config of data + videos_per_gpu=8, # Batch size of each single GPU + workers_per_gpu=8, # Workers to pre-fetch data for each single GPU + train_dataloader=dict( # Additional config of train dataloader + drop_last=True), # Whether to drop out the last batch of data in training + val_dataloader=dict( # Additional config of validation dataloader + videos_per_gpu=1), # Batch size of each single GPU during evaluation + test_dataloader=dict( # Additional config of test dataloader + videos_per_gpu=2), # Batch size of each single GPU during testing + test=dict( # Testing dataset config + type=dataset_type, + ann_file=ann_file_test, + pipeline=test_pipeline, + data_prefix=data_root_val), + val=dict( # Validation dataset config + type=dataset_type, + ann_file=ann_file_val, + pipeline=val_pipeline, + data_prefix=data_root_val), + train=dict( # Training dataset config + type=dataset_type, + ann_file=ann_file_train, + pipeline=train_pipeline, + data_prefix=data_root)) + + # optimizer + optimizer = dict( + # Config used to build optimizer, support (1). All the optimizers in PyTorch + # whose arguments are also the same as those in PyTorch. (2). Custom optimizers + # which are builed on `constructor`, referring to "tutorials/new_modules.md" + # for implementation. + type='Adam', # Type of optimizer, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details + lr=0.001, # Learning rate, see detail usages of the parameters in the documentaion of PyTorch + weight_decay=0.0001) # Weight decay of Adam + optimizer_config = dict( # Config used to build the optimizer hook + grad_clip=None) # Most of the methods do not use gradient clip + # learning policy + lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook + policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 + step=7) # Steps to decay the learning rate + + total_epochs = 9 # Total epochs to train the model + checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation + interval=1) # Interval to save checkpoint + evaluation = dict( # Config of evaluation during training + interval=1, # Interval to perform evaluation + metrics=['AR@AN']) # Metrics to be performed + log_config = dict( # Config to register logger hook + interval=50, # Interval to print the log + hooks=[ # Hooks to be implemented during training + dict(type='TextLoggerHook'), # The logger used to record the training process + # dict(type='TensorboardLoggerHook'), # The Tensorboard logger is also supported + ]) + + # runtime settings + dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set + log_level = 'INFO' # The level of logging + work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/' # Directory to save the model checkpoints and logs for the current experiments + load_from = None # load models as a pre-trained model from a given path. This will not resume training + resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved + workflow = [('train', 1)] # Workflow for # runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once + output_config = dict( # Config of localization ouput + out=f'{work_dir}/results.json', # Path to output file + output_format='json') # File format of output file + ``` + +### Config System for Action Recognition + +We incorporate modular design into our config system, +which is convenient to conduct various experiments. + +- An Example of TSN + + To help the users have a basic idea of a complete config structure and the modules in an action recognition system, + we make brief comments on the config of TSN as the following. + For more detailed usage and alternative for per parameter in each module, please refer to the API documentation. + + ```python + # model settings + model = dict( # Config of the model + type='Recognizer2D', # Type of the recognizer + backbone=dict( # Dict for backbone + type='ResNet', # Name of the backbone + pretrained='torchvision://resnet50', # The url/site of the pretrained model + depth=50, # Depth of ResNet model + norm_eval=False), # Whether to set BN layers to eval mode when training + cls_head=dict( # Dict for classification head + type='TSNHead', # Name of classification head + num_classes=400, # Number of classes to be classified. + in_channels=2048, # The input channels of classification head. + spatial_type='avg', # Type of pooling in spatial dimension + consensus=dict(type='AvgConsensus', dim=1), # Config of consensus module + dropout_ratio=0.4, # Probability in dropout layer + init_std=0.01)) # Std value for linear layer initiation + # model training and testing settings + train_cfg = None # Config of training hyperparameters for TSN + test_cfg = dict(average_clips=None) # Config for testing hyperparameters for TSN. Here we define clip averaging method in it + + # dataset settings + dataset_type = 'RawframeDataset' # Type of dataset for training, valiation and testing + data_root = 'data/kinetics400/rawframes_train/' # Root path to data for training + data_root_val = 'data/kinetics400/rawframes_val/' # Root path to data for validation and testing + ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' # Path to the annotation file for training + ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' # Path to the annotation file for validation + ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' # Path to the annotation file for testing + img_norm_cfg = dict( # Config of image normalition used in data pipeline + mean=[123.675, 116.28, 103.53], # Mean values of different channels to normalize + std=[58.395, 57.12, 57.375], # Std values of different channels to normalize + to_bgr=False) # Whether to convert channels from RGB to BGR + + train_pipeline = [ # List of training pipeline steps + dict( # Config of SampleFrames + type='SampleFrames', # Sample frames pipeline, sampling frames from video + clip_len=1, # Frames of each sampled output clip + frame_interval=1, # Temporal interval of adjacent sampled frames + num_clips=3), # Number of clips to be sampled + dict( # Config of RawFrameDecode + type='RawFrameDecode'), # Load and decode Frames pipeline, picking raw frames with given indices + dict( # Config of Resize + type='Resize', # Resize pipeline + scale=(-1, 256)), # The scale to resize images + dict( # Config of MultiScaleCrop + type='MultiScaleCrop', # Multi scale crop pipeline, cropping images with a list of randomly selected scales + input_size=224, # Input size of the network + scales=(1, 0.875, 0.75, 0.66), # Scales of weight and height to be selected + random_crop=False, # Whether to randomly sample cropping bbox + max_wh_scale_gap=1), # Maximum gap of w and h scale levels + dict( # Config of Resize + type='Resize', # Resize pipeline + scale=(224, 224), # The scale to resize images + keep_ratio=False), # Whether to resize with changing the aspect ratio + dict( # Config of Flip + type='Flip', # Flip Pipeline + flip_ratio=0.5), # Probability of implementing flip + dict( # Config of Normalize + type='Normalize', # Normalize pipeline + **img_norm_cfg), # Config of image normalization + dict( # Config of FormatShape + type='FormatShape', # Format shape pipeline, Format final image shape to the given input_format + input_format='NCHW'), # Final image shape format + dict( # Config of Collect + type='Collect', # Collect pipeline that decides which keys in the data should be passed to the recognizer + keys=['imgs', 'label'], # Keys of input + meta_keys=[]), # Meta keys of input + dict( # Config of ToTensor + type='ToTensor', # Convert other types to tensor type pipeline + keys=['imgs', 'label']) # Keys to be converted from image to tensor + ] + val_pipeline = [ # List of validation pipeline steps + dict( # Config of SampleFrames + type='SampleFrames', # Sample frames pipeline, sampling frames from video + clip_len=1, # Frames of each sampled output clip + frame_interval=1, # Temporal interval of adjacent sampled frames + num_clips=3, # Number of clips to be sampled + test_mode=True), # Whether to set test mode in sampling + dict( # Config of RawFrameDecode + type='RawFrameDecode'), # Load and decode Frames pipeline, picking raw frames with given indices + dict( # Config of Resize + type='Resize', # Resize pipeline + scale=(-1, 256)), # The scale to resize images + dict( # Config of CenterCrop + type='CenterCrop', # Center crop pipeline, cropping the center area from images + crop_size=224), # The size to crop images + dict( # Config of Flip + type='Flip', # Flip pipeline + flip_ratio=0), # Probability of implementing flip + dict( # Config of Normalize + type='Normalize', # Normalize pipeline + **img_norm_cfg), # Config of image normalization + dict( # Config of FormatShape + type='FormatShape', # Format shape pipeline, Format final image shape to the given input_format + input_format='NCHW'), # Final image shape format + dict( # Config of Collect + type='Collect', # Collect pipeline that decides which keys in the data should be passed to the recognizer + keys=['imgs', 'label'], # Keys of input + meta_keys=[]), # Meta keys of input + dict( # Config of ToTensor + type='ToTensor', # Convert other types to tensor type pipeline + keys=['imgs']) # Keys to be converted from image to tensor + ] + test_pipeline = [ # List of testing pipeline steps + dict( # Config of SampleFrames + type='SampleFrames', # Sample frames pipeline, sampling frames from video + clip_len=1, # Frames of each sampled output clip + frame_interval=1, # Temporal interval of adjacent sampled frames + num_clips=25, # Number of clips to be sampled + test_mode=True), # Whether to set test mode in sampling + dict( # Config of RawFrameDecode + type='RawFrameDecode'), # Load and decode Frames pipeline, picking raw frames with given indices + dict( # Config of Resize + type='Resize', # Resize pipeline + scale=(-1, 256)), # The scale to resize images + dict( # Config of CenterCrop + type='TenCrop', # Center crop pipeline, cropping the center area from images + crop_size=224), # The size to crop images + dict( # Config of Flip + type='Flip', # Flip pipeline + flip_ratio=0), # Probability of implementing flip + dict( # Config of Normalize + type='Normalize', # Normalize pipeline + **img_norm_cfg), # Config of image normalization + dict( # Config of FormatShape + type='FormatShape', # Format shape pipeline, Format final image shape to the given input_format + input_format='NCHW'), # Final image shape format + dict( # Config of Collect + type='Collect', # Collect pipeline that decides which keys in the data should be passed to the recognizer + keys=['imgs', 'label'], # Keys of input + meta_keys=[]), # Meta keys of input + dict( # Config of ToTensor + type='ToTensor', # Convert other types to tensor type pipeline + keys=['imgs']) # Keys to be converted from image to tensor + ] + data = dict( # Config of data + videos_per_gpu=32, # Batch size of each single GPU + workers_per_gpu=4, # Workers to pre-fetch data for each single GPU + train_dataloader=dict( # Additional config of train dataloader + drop_last=True), # Whether to drop out the last batch of data in training + val_dataloader=dict( # Additional config of validation dataloader + videos_per_gpu=1), # Batch size of each single GPU during evaluation + test_dataloader=dict( # Additional config of test dataloader + videos_per_gpu=2), # Batch size of each single GPU during testing + train=dict( # Training dataset config + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( # Validation dataset config + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( # Testing dataset config + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + pipeline=test_pipeline)) + # optimizer + optimizer = dict( + # Config used to build optimizer, support (1). All the optimizers in PyTorch + # whose arguments are also the same as those in PyTorch. (2). Custom optimizers + # which are builed on `constructor`, referring to "tutorials/new_modules.md" + # for implementation. + type='SGD', # Type of optimizer, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py#L13 for more details + lr=0.01, # Learning rate, see detail usages of the parameters in the documentaion of PyTorch + momentum=0.9, # Momentum, + weight_decay=0.0001) # Weight decay of SGD + optimizer_config = dict( # Config used to build the optimizer hook + grad_clip=dict(max_norm=40, norm_type=2)) # Use gradient clip + # learning policy + lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook + policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 + step=[40, 80]) # Steps to decay the learning rate + total_epochs = 100 # Total epochs to train the model + checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation + interval=5) # Interval to save checkpoint + evaluation = dict( # Config of evaluation during training + interval=5, # Interval to perform evaluation + metrics=['top_k_accuracy', 'mean_class_accuracy'], # Metrics to be performed + topk=(1, 5)) # K value for `top_k_accuracy` metric + log_config = dict( # Config to register logger hook + interval=20, # Interval to print the log + hooks=[ # Hooks to be implemented during training + dict(type='TextLoggerHook'), # The logger used to record the training process + # dict(type='TensorboardLoggerHook'), # The Tensorboard logger is also supported + ]) + + # runtime settings + dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set + log_level = 'INFO' # The level of logging + work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb/' # Directory to save the model checkpoints and logs for the current experiments + load_from = None # load models as a pre-trained model from a given path. This will not resume training + resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved + workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once + + ``` + + +## FAQ + +### Use intermediate variables in configs + +Some intermediate variables are used in the config files, like `train_pipeline`/`val_pipeline`/`test_pipeline`, +`ann_file_train`/`ann_file_val`/`ann_file_test`, `img_norm_cfg` etc. + +For Example, we would like to first define `train_pipeline`/`val_pipeline`/`test_pipeline` and pass them into `data`. +Thus, `train_pipeline`/`val_pipeline`/`test_pipeline` are intermediate variable. + +we also define `ann_file_train`/`ann_file_val`/`ann_file_test` and `data_root`/`data_root_val` to provide data pipeline some +basic information. + +In addition, we use `img_norm_cfg` as intermediate variables to construct data augmentation components. + +```python +... +dataset_type = 'RawframeDataset' +data_root = 'data/kinetics400/rawframes_train' +data_root_val = 'data/kinetics400/rawframes_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) + +train_pipeline = [ + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=10, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] + +data = dict( + videos_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + pipeline=test_pipeline)) +``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/data_preparation.md b/Downstream/Open-Set-Action-Recognition/docs/data_preparation.md new file mode 100644 index 0000000..9197031 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/data_preparation.md @@ -0,0 +1,153 @@ +# Data Preparation + +We provide some tips for MMAction2 data preparation in this file. + + + +- [Notes on Video Data Format](#notes-on-video-data-format) +- [Getting Data](#getting-data) + * [Prepare videos](#prepare-videos) + * [Extract frames](#extract-frames) + + [Alternative to denseflow](#alternative-to-denseflow) + * [Generate file list](#generate-file-list) + * [Prepare audio](#prepare-audio) + + + +## Notes on Video Data Format + +MMAction2 supports two types of data format: raw frames and video. The former is widely used in previous projects such as [TSN](https://github.com/yjxiong/temporal-segment-networks). +This is fast when SSD is available but fails to scale to the fast-growing datasets. +(For example, the newest edition of [Kinetics](https://deepmind.com/research/open-source/open-source-datasets/kinetics/) has 650K videos and the total frames will take up several TBs.) +The latter saves much space but has to do the computation intensive video decoding at execution time +To make video decoding faster, we support several efficient video loading libraries, such as [decord](https://github.com/zhreshold/decord), [PyAV](https://github.com/PyAV-Org/PyAV), etc. + +## Getting Data + +The following guide is helpful when you want to experiment with custom dataset. +Similar to the datasets stated above, it is recommended organizing in `$MMACTION2/data/$DATASET`. + +### Prepare videos + + Please refer to the official website and/or the official script to prepare the videos. +Note that the videos should be arranged in either + +(1). A two-level directory organized by `${CLASS_NAME}/${VIDEO_ID}`, which is recommended to be used for for action recognition datasets (such as UCF101 and Kinetics) + +(2). A single-level directory, which is recommended to be used for for action detection datasets or those with multiple annotations per video (such as THUMOS14). + +### Extract frames + +To extract both frames and optical flow, you can use the tool [denseflow](https://github.com/open-mmlab/denseflow) we wrote. +Since different frame extraction tools produce different number of frames, +it is beneficial to use the same tool to do both frame extraction and the flow computation, to avoid mismatching of frame counts. + +```shell +python build_rawframes.py ${SRC_FOLDER} ${OUT_FOLDER} [--task ${TASK}] [--level ${LEVEL}] \ + [--num-worker ${NUM_WORKER}] [--flow-type ${FLOW_TYPE}] [--out-format ${OUT_FORMAT}] \ + [--ext ${EXT}] [--new-width ${NEW_WIDTH}] [--new-height ${NEW_HEIGHT}] [--new-short ${NEW_SHORT}] \ + [--resume] [--use-opencv] [--mixed-ext] +``` + +- `SRC_FOLDER`: Folder of the original video. +- `OUT_FOLDER`: Root folder where the extracted frames and optical flow store. +- `TASK`: Extraction task indicating which kind of frames to extract. Allowed choices are `rgb`, `flow`, `both`. +- `LEVEL`: Directory level. 1 for the single-level directory or 2 for the two-level directory. +- `NUM_WORKER`: Number of workers to build rawframes. +- `FLOW_TYPE`: Flow type to extract, e.g., `None`, `tvl1`, `warp_tvl1`, `farn`, `brox`. +- `OUT_FORMAT`: Output format for extracted frames, e.g., `jpg`, `h5`, `png`. +- `EXT`: Video file extension, e.g., `avi`, `mp4`. +- `NEW_WIDTH`: Resized image width of output. +- `NEW_HEIGHT`: Resized image height of output. +- `NEW_SHORT`: Resized image short side length keeping ratio. +- `--resume`: Whether to resume optical flow extraction instead of overwriting. +- `--use-opencv`: Whether to use OpenCV to extract rgb frames. +- `--mixed-ext`: Indicate whether process video files with mixed extensions. + +The recommended practice is + +1. set `$OUT_FOLDER` to be a folder located in SSD. +2. symlink the link `$OUT_FOLDER` to `$MMACTION2/data/$DATASET/rawframes`. + +```shell +ln -s ${YOUR_FOLDER} $MMACTION2/data/$DATASET/rawframes +``` + +#### Alternative to denseflow + +In case your device doesn't fulfill the installation requirement of [denseflow](https://github.com/open-mmlab/denseflow)(like Nvidia driver version), or you just want to see some quick demos about flow extraction, we provide a python script `tools/flow_extraction.py` as an alternative to denseflow. You can use it for rgb frames and optical flow extraction from one or several videos. Note that the speed of the script is much slower than denseflow, since it runs optical flow algorithms on CPU. + +```shell +python tools/flow_extraction.py --input ${INPUT} [--prefix ${PREFIX}] [--dest ${DEST}] [--rgb-tmpl ${RGB_TMPL}] \ + [--flow-tmpl ${FLOW_TMPL}] [--start-idx ${START_IDX}] [--method ${METHOD}] [--bound ${BOUND}] [--save-rgb] +``` + +- `INPUT`: Videos for frame extraction, can be single video or a video list, the video list should be a txt file and just consists of filenames without directories. +- `PREFIX`: The prefix of input videos, used when input is a video list. +- `DEST`: The destination to save extracted frames. +- `RGB_TMPL`: The template filename of rgb frames. +- `FLOW_TMPL`: The template filename of flow frames. +- `START_IDX`: The start index of extracted frames. +- `METHOD`: The method used to generate flow. +- `BOUND`: The maximum of optical flow. +- `SAVE_RGB`: Also save extracted rgb frames. + +### Generate file list + +We provide a convenient script to generate annotation file list. You can use the following command to extract frames. + +```shell +cd $MMACTION2 +python tools/data/build_file_list.py ${DATASET} ${SRC_FOLDER} [--rgb-prefix ${RGB_PREFIX}] \ + [--flow-x-prefix ${FLOW_X_PREFIX}] [--flow-y-prefix ${FLOW_Y_PREFIX}] [--num-split ${NUM_SPLIT}] \ + [--subset ${SUBSET}] [--level ${LEVEL}] [--format ${FORMAT}] [--out-root-path ${OUT_ROOT_PATH}] \ + [--seed ${SEED}] [--shuffle] +``` + +- `DATASET`: Dataset to be prepared, e.g., `ucf101`, `kinetics400`, `thumos14`, `sthv1`, `sthv2`, etc. +- `SRC_FOLDER`: Folder of the corresponding data format: + - "$MMACTION2/data/$DATASET/rawframes" if `--format rawframes`. + - "$MMACTION2/data/$DATASET/videos" if `--format videos`. +- `RGB_PREFIX`: Name prefix of rgb frames. +- `FLOW_X_PREFIX`: Name prefix of x flow frames. +- `FLOW_Y_PREFIX`: Name prefix of y flow frames. +- `NUM_SPLIT`: Number of split to file list. +- `SUBSET`: Subset to generate file list. Allowed choice are `train`, `val`, `test`. +- `LEVEL`: Directory level. 1 for the single-level directory or 2 for the two-level directory. +- `FORMAT`: Source data format to generate file list. Allowed choices are `rawframes`, `videos`. +- `OUT_ROOT_PATH`: Root path for output +- `SEED`: Random seed. +- `--shuffle`: Whether to shuffle the file list. + +Now, you can go to [getting_started.md](getting_started.md) to train and test the model. + +### Prepare audio + +We also provide a simple script for audio waveform extraction and mel-spectrogram generation. + +```shell +cd $MMACTION2 +python tools/data/extract_audio.py ${ROOT} ${DST_ROOT} [--ext ${EXT}] [--num-workers ${N_WORKERS}] \ + [--level ${LEVEL}] +``` + +- `ROOT`: The root directory of the videos. +- `DST_ROOT`: The destination root directory of the audios. +- `EXT`: Extention of the video files. e.g., `.mp4`. +- `N_WORKERS`: Number of processes to be used. + +After extracting audios, you are free to decode and generate the spectrogram on-the-fly such as [this](/configs/audio_recognition/tsn_r50_64x1x1_kinetics400_audio.py). As for the annotations, you can directly use those of the rawframes as long as you keep the relative position of audio files same as the rawframes directory. However, extracting spectrogram on-the-fly is slow and bad for prototype iteration. Therefore, we also provide a script (and many useful tools to play with) for you to generation spectrogram off-line. + +```shell +cd $MMACTION2 +python tools/data/build_audio_features.py ${AUDIO_HOME_PATH} ${SPECTROGRAM_SAVE_PATH} [--level ${LEVEL}] \ + [--ext $EXT] [--num-workers $N_WORKERS] [--part $PART] +``` + +- `AUDIO_HOME_PATH`: The root directory of the audio files. +- `SPECTROGRAM_SAVE_PATH`: The destination root directory of the audio features. +- `EXT`: Extention of the audio files. e.g., `.m4a`. +- `N_WORKERS`: Number of processes to be used. +- `PART`: Determines how many parts to be splited and which part to run. e.g., `2/5` means splitting all files into 5-fold and executing the 2nd part. This is useful if you have several machines. + +The annotations for audio spectrogram features are identical to those of rawframes. You can simply make a copy of `dataset_[train/val]_list_rawframes.txt` and rename it as `dataset_[train/val]_list_audio_feature.txt` diff --git a/Downstream/Open-Set-Action-Recognition/docs/faq.md b/Downstream/Open-Set-Action-Recognition/docs/faq.md new file mode 100644 index 0000000..3047975 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/faq.md @@ -0,0 +1,55 @@ +# FAQ + +We list some common issues faced by many users and their corresponding solutions here. +Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. +If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. + + +## Installation + +- **"No module named 'mmcv.ops'"; "No module named 'mmcv._ext'"** + + 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. + 2. Install mmcv-full following the [installation instruction](https://mmcv.readthedocs.io/en/latest/#installation). + +- **"OSError: MoviePy Error: creation of None failed because of the following error"** + + Refer to [install.md](https://github.com/open-mmlab/mmaction2/blob/master/docs/install.md#requirements) + 1. For Windows users, [ImageMagick](https://www.imagemagick.org/script/index.php) will not be automatically detected by MoviePy, + there is a need to modify `moviepy/config_defaults.py` file by providing the path to the ImageMagick binary called `magick`, + like `IMAGEMAGICK_BINARY = "C:\\Program Files\\ImageMagick_VERSION\\magick.exe"` + 2. For Linux users, there is a need to modify the `/etc/ImageMagick-6/policy.xml` file by commenting out + `` to ``, + if ImageMagick is not detected by moviepy. + +## Data + +- **FileNotFound like `No such file or directory: xxx/xxx/img_00300.jpg`** + + In our repo, we set `start_index=1` as default value for rawframe dataset, and `start_index=0` as default value for video dataset. + If users encounter FileNotFound error for the first or last frame of the data, there is a need to check the files begin with offset 0 or 1, + that is `xxx_00000.jpg` or `xxx_00001.jpg`, and then change the `start_index` value of data pipeline in configs. + + +## Training + +- **How to just use trained recognizer models for backbone pre-training ?** + + Refer to [Use Pre-Trained Model](https://github.com/open-mmlab/mmaction2/blob/master/docs/tutorials/finetune.md#use-pre-trained-model), + in order to use the pre-trained model for the whole network, the new config adds the link of pre-trained models in the `load_from`. + + And to use backbone for pre-training, you can change `pretrained` value in the backbone dict of config files to the checkpoint path / url. + When training, the unexpected keys will be ignored. + + +## Testing + +- **How to make predicted score normalized by softmax within [0, 1] ?** + + change this in the config, make `test_cfg = dict(average_clips='prob')`. + +## Deploying + +- **Why is the onnx model converted by mmaction2 throwing error when converting to other frameworks such as TensorRT?** + + For now, we can only make sure that models in mmaction2 are onnx-compatible. However, some operations in onnx may be unsupported by your target framework for deployment, e.g. TensorRT in [this issue](https://github.com/open-mmlab/mmaction2/issues/414). When such situation occurs, we suggest you raise an issue in the repo of your target framework as long as `pytorch2onnx.py` works well and is verified numerically. diff --git a/Downstream/Open-Set-Action-Recognition/docs/getting_started.md b/Downstream/Open-Set-Action-Recognition/docs/getting_started.md new file mode 100644 index 0000000..0bc61b6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/getting_started.md @@ -0,0 +1,420 @@ +# Getting Started + +This page provides basic tutorials about the usage of MMAction2. +For installation instructions, please see [install.md](install.md). + + + +- [Datasets](#datasets) +- [Inference with Pre-Trained Models](#inference-with-pre-trained-models) + * [Test a dataset](#test-a-dataset) + * [High-level APIs for testing a video and rawframes.](#high-level-apis-for-testing-a-video-and-rawframes) +- [Build a Model](#build-a-model) + * [Build a model with basic components](#build-a-model-with-basic-components) + * [Write a new model](#write-a-new-model) +- [Train a Model](#train-a-model) + * [Iteration pipeline](#iteration-pipeline) + * [Training setting](#training-setting) + * [Train with a single GPU](#train-with-a-single-gpu) + * [Train with multiple GPUs](#train-with-multiple-gpus) + * [Train with multiple machines](#train-with-multiple-machines) + * [Launch multiple jobs on a single machine](#launch-multiple-jobs-on-a-single-machine) +- [Tutorials](#tutorials) + + + +## Datasets + +It is recommended to symlink the dataset root to `$MMACTION2/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── kinetics400 +│ │ ├── rawframes_train +│ │ ├── rawframes_val +│ │ ├── kinetics_train_list.txt +│ │ ├── kinetics_val_list.txt +│ ├── ucf101 +│ │ ├── rawframes_train +│ │ ├── rawframes_val +│ │ ├── ucf101_train_list.txt +│ │ ├── ucf101_val_list.txt +│ ├── ... +``` +For more information on data preparation, please see [data_preparation.md](data_preparation.md) + +For using custom datasets, please refer to [Tutorial 2: Adding New Dataset](tutorials/new_dataset.md) + +## Inference with Pre-Trained Models + +We provide testing scripts to evaluate a whole dataset (Kinetics-400, Something-Something V1&V2, (Multi-)Moments in Time, etc.), +and provide some high-level apis for easier integration to other projects. + +### Test a dataset + +- [x] single GPU +- [x] single node multiple GPUs +- [x] multiple node + +You can use the following commands to test a dataset. + +```shell +# single-gpu testing +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] \ + [--gpu-collect] [--tmpdir ${TMPDIR}] [--options ${OPTIONS}] [--average-clips ${AVG_TYPE}] \ + [--launcher ${JOB_LAUNCHER}] [--local_rank ${LOCAL_RANK}] + +# multi-gpu testing +./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] \ + [--gpu-collect] [--tmpdir ${TMPDIR}] [--options ${OPTIONS}] [--average-clips ${AVG_TYPE}] \ + [--launcher ${JOB_LAUNCHER}] [--local_rank ${LOCAL_RANK}] +``` + +Optional arguments: +- `RESULT_FILE`: Filename of the output results. If not specified, the results will not be saved to a file. +- `EVAL_METRICS`: Items to be evaluated on the results. Allowed values depend on the dataset, e.g., `top_k_accuracy`, `mean_class_accuracy` are available for all datasets in recognition, `mmit_mean_average_precision` for Multi-Moments in Time, `mean_average_precision` for Multi-Moments in Time and HVU single category. `AR@AN` for ActivityNet, etc. +- `--gpu-collect`: If specified, recognition results will be collected using gpu communication. Otherwise, it will save the results on different gpus to `TMPDIR` and collect them by the rank 0 worker. +- `TMPDIR`: Temporary directory used for collecting results from multiple workers, available when `--gpu-collect` is not specified. +- `OPTIONS`: Custom options used for evaluation. Allowed values depend on the arguments of the `evaluate` function in dataset. +- `AVG_TYPE`: Items to average the test clips. If set to `prob`, it will apply softmax before averaging the clip scores. Otherwise, it will directly average the clip scores. +- `JOB_LAUNCHER`: Items for distributed job initialization launcher. Allowed choices are `none`, `pytorch`, `slurm`, `mpi`. Especially, if set to none, it will test in a non-distributed mode. +- `LOCAL_RANK`: ID for local rank. If not specified, it will be set to 0. + +Examples: + +Assume that you have already downloaded the checkpoints to the directory `checkpoints/`. + +1. Test TSN on Kinetics-400 (without saving the test results) and evaluate the top-k accuracy and mean class accuracy. + + ```shell + python tools/test.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth \ + --eval top_k_accuracy mean_class_accuracy + ``` + +2. Test TSN on Something-Something V1 with 8 GPUS, and evaluate the top-k accuracy. + + ```shell + ./tools/dist_test.sh configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth \ + 8 --out results.pkl --eval top_k_accuracy + ``` + +3. Test TSN on Kinetics-400 in slurm environment and evaluate the top-k accuracy + + ```shell + python tools/test.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py \ + checkpoints/SOME_CHECKPOINT.pth \ + --launcher slurm --eval top_k_accuracy + ``` + + +### High-level APIs for testing a video and rawframes. + +Here is an example of building the model and testing a given video. + +```python +import torch + +from mmaction.apis import init_recognizer, inference_recognizer + +config_file = 'configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py' +# download the checkpoint from model zoo and put it in `checkpoints/` +checkpoint_file = 'checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth' + +# assign the desired device. +device = 'cuda:0' # or 'cpu' +device = torch.device(device) + + # build the model from a config file and a checkpoint file +model = init_recognizer(config_file, checkpoint_file, device=device) + +# test a single video and show the result: +video = 'demo/demo.mp4' +labels = 'demo/label_map.txt' +results = inference_recognizer(model, video, labels) + +# show the results +print(f'The top-5 labels with corresponding scores are:') +for result in results: + print(f'{result[0]}: ', result[1]) +``` + +Here is an example of building the model and testing with a given rawframes directory. + +```python +import torch + +from mmaction.apis import init_recognizer, inference_recognizer + +config_file = 'configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py' +# download the checkpoint from model zoo and put it in `checkpoints/` +checkpoint_file = 'checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth' + +# assign the desired device. +device = 'cuda:0' # or 'cpu' +device = torch.device(device) + + # build the model from a config file and a checkpoint file +model = init_recognizer(config_file, checkpoint_file, device=device, use_frames=True) + +# test rawframe directory of a single video and show the result: +video = 'SOME_DIR_PATH/' +labels = 'demo/label_map.txt' +results = inference_recognizer(model, video, labels, use_frames=True) + +# show the results +print(f'The top-5 labels with corresponding scores are:') +for result in results: + print(f'{result[0]}: ', result[1]) +``` + +Here is an example of building the model and testing with a given video url. + +```python +import torch + +from mmaction.apis import init_recognizer, inference_recognizer + +config_file = 'configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py' +# download the checkpoint from model zoo and put it in `checkpoints/` +checkpoint_file = 'checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth' + +# assign the desired device. +device = 'cuda:0' # or 'cpu' +device = torch.device(device) + + # build the model from a config file and a checkpoint file +model = init_recognizer(config_file, checkpoint_file, device=device, use_frames=True) + +# test rawframe directory of a single video and show the result: +video = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4' +labels = 'demo/label_map.txt' +results = inference_recognizer(model, video, labels, use_frames=True) + +# show the results +print(f'The top-5 labels with corresponding scores are:') +for result in results: + print(f'{result[0]}: ', result[1]) +``` + +**Note**: We define `data_prefix` in config files and set it None as default for our provided inference configs. +If the `data_prefix` is not None, the path for the video file (or rawframe directory) to get will be `data_prefix/video`. +Here, the `video` is the param in the demo scripts above. +This detail can be found in `rawframe_dataset.py` and `video_dataset.py`. For example, + +* When video (rawframes) path is `SOME_DIR_PATH/VIDEO.mp4` (`SOME_DIR_PATH/VIDEO_NAME/img_xxxxx.jpg`), and `data_prefix` is None in the config file, +the param `video` should be `SOME_DIR_PATH/VIDEO.mp4` (`SOME_DIR_PATH/VIDEO_NAME`). + +* When video (rawframes) path is `SOME_DIR_PATH/VIDEO.mp4` (`SOME_DIR_PATH/VIDEO_NAME/img_xxxxx.jpg`), and `data_prefix` is `SOME_DIR_PATH` in the config file, +the param `video` should be `VIDEO.mp4` (`VIDEO_NAME`). + +* When rawframes path is `VIDEO_NAME/img_xxxxx.jpg`, and `data_prefix` is None in the config file, the param `video` should be `VIDEO_NAME`. + +* When passing a url instead of a local video file, you need to use OpenCV as the video decoding backend. + +A notebook demo can be found in [demo/demo.ipynb](/demo/demo.ipynb) + + +## Build a Model + +### Build a model with basic components + +In MMAction2, model components are basically categorized as 4 types. + +- recognizer: the whole recognizer model pipeline, usually contains a backbone and cls_head. +- backbone: usually an FCN network to extract feature maps, e.g., ResNet, BNInception. +- cls_head: the component for classification task, usually contains an FC layer with some pooling layers. +- localizer: the model for localization task, currently available: BSN, BMN. + +Following some basic pipelines (e.g., `Recognizer2D`), the model structure +can be customized through config files with no pains. + +If we want to implement some new components, e.g., the temporal shift backbone structure as +in [TSM: Temporal Shift Module for Efficient Video Understanding](https://arxiv.org/abs/1811.08383), there are several things to do. + +1. create a new file in `mmaction/models/backbones/resnet_tsm.py`. + + ```python + from ..registry import BACKBONES + from .resnet import ResNet + + @BACKBONES.register_module() + class ResNetTSM(ResNet): + + def __init__(self, + depth, + num_segments=8, + is_shift=True, + shift_div=8, + shift_place='blockres', + temporal_pool=False, + **kwargs): + pass + + def forward(self, x): + # implementation is ignored + pass + ``` + +2. Import the module in `mmaction/models/backbones/__init__.py` + ```python + from .resnet_tsm import ResNetTSM + ``` + +3. modify the config file from + + ```python + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False) + ``` + to + ```python + backbone=dict( + type='ResNetTSM', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False, + shift_div=8) + ``` + +### Write a new model + +To write a new recognition pipeline, you need to inherit from `BaseRecognizer`, +which defines the following abstract methods. + +- `forward_train()`: forward method of the training mode. +- `forward_test()`: forward method of the testing mode. + +[Recognizer2D](/mmaction/models/recognizers/recognizer2d.py) and [Recognizer3D](/mmaction/models/recognizers/recognizer3d.py) +are good examples which show how to do that. + + +## Train a Model + +### Iteration pipeline + +MMAction2 implements distributed training and non-distributed training, +which uses `MMDistributedDataParallel` and `MMDataParallel` respectively. + +We adopt distributed training for both single machine and multiple machines. +Supposing that the server has 8 GPUs, 8 processes will be started and each process runs on a single GPU. + +Each process keeps an isolated model, data loader, and optimizer. +Model parameters are only synchronized once at the beginning. +After a forward and backward pass, gradients will be allreduced among all GPUs, +and the optimizer will update model parameters. +Since the gradients are allreduced, the model parameter stays the same for all processes after the iteration. + +### Training setting + +All outputs (log files and checkpoints) will be saved to the working directory, +which is specified by `work_dir` in the config file. + +By default we evaluate the model on the validation set after each epoch, you can change the evaluation interval by modifying the interval argument in the training config +```python +evaluation = dict(interval=5) # This evaluate the model per 5 epoch. +``` + +According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you need to set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, e.g., lr=0.01 for 4 GPUs * 2 video/gpu and lr=0.08 for 16 GPUs * 4 video/gpu. + +### Train with a single GPU + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +If you want to specify the working directory in the command, you can add an argument `--work-dir ${YOUR_WORK_DIR}`. + +### Train with multiple GPUs + +```shell +./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [optional arguments] +``` + +Optional arguments are: + +- `--validate` (**strongly recommended**): Perform evaluation at every k (default value is 5, which can be modified by changing the `interval` value in `evaluation` dict in each config file) epochs during the training. +- `--work-dir ${WORK_DIR}`: Override the working directory specified in the config file. +- `--resume-from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. +- `--gpus ${GPU_NUM}`: Number of gpus to use, which is only applicable to non-distributed training. +- `--gpu-ids ${GPU_IDS}`: IDs of gpus to use, which is only applicable to non-distributed training. +- `--seed ${SEED}`: Seed id for random state in python, numpy and pytorch to generate random numbers. +- `--deterministic`: If specified, it will set deterministic options for CUDNN backend. +- `JOB_LAUNCHER`: Items for distributed job initialization launcher. Allowed choices are `none`, `pytorch`, `slurm`, `mpi`. Especially, if set to none, it will test in a non-distributed mode. +- `LOCAL_RANK`: ID for local rank. If not specified, it will be set to 0. + +Difference between `resume-from` and `load-from`: +`resume-from` loads both the model weights and optimizer status, and the epoch is also inherited from the specified checkpoint. It is usually used for resuming the training process that is interrupted accidentally. +`load-from` only loads the model weights and the training epoch starts from 0. It is usually used for finetuning. + +Here is an example of using 8 GPUs to load TSN checkpoint. + +```shell +./tools/dist_train.sh configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py 8 --resume-from work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb/latest.pth +``` + +### Train with multiple machines + +If you can run MMAction2 on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. (This script also supports single machine training.) + +```shell +[GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} [--work-dir ${WORK_DIR}] +``` + +Here is an example of using 16 GPUs to train TSN on the dev partition in a slurm cluster. (use `GPUS_PER_NODE=8` to specify a single slurm cluster node with 8 GPUs.) + +```shell +GPUS=16 ./tools/slurm_train.sh dev tsn_r50_k400 configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py --work-dir work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb +``` + +You can check [slurm_train.sh](/tools/slurm_train.sh) for full arguments and environment variables. + +If you have just multiple machines connected with ethernet, you can refer to +pytorch [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs, you can set the port in commands. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you use launch training jobs with slurm, you need to modify the config files (usually the 6th line from the bottom in config files) to set different communication ports. + +In `config1.py`, +```python +dist_params = dict(backend='nccl', port=29500) +``` + +In `config2.py`, +```python +dist_params = dict(backend='nccl', port=29501) +``` + +Then you can launch two jobs with `config1.py` ang `config2.py`. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py [--work-dir ${WORK_DIR}] +CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py [--work-dir ${WORK_DIR}] +``` + +## Tutorials + +Currently, we provide some tutorials for users to [finetune model](tutorials/finetune.md), +[add new dataset](tutorials/new_dataset.md), [customize data pipelines](tutorials/data_pipeline.md), +[add new modules](tutorials/new_modules.md), [export a model to ONNX](tutorials/export_model.md) and [customize runtime settings](tutorials/customize_runtime.md). diff --git a/Downstream/Open-Set-Action-Recognition/docs/imgs/acc_curve.png b/Downstream/Open-Set-Action-Recognition/docs/imgs/acc_curve.png new file mode 100644 index 0000000..27a2f08 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/docs/imgs/acc_curve.png differ diff --git a/Downstream/Open-Set-Action-Recognition/docs/imgs/data_pipeline.png b/Downstream/Open-Set-Action-Recognition/docs/imgs/data_pipeline.png new file mode 100644 index 0000000..c5217b1 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/docs/imgs/data_pipeline.png differ diff --git a/Downstream/Open-Set-Action-Recognition/docs/imgs/mmaction2_logo.png b/Downstream/Open-Set-Action-Recognition/docs/imgs/mmaction2_logo.png new file mode 100644 index 0000000..f0c759b Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/docs/imgs/mmaction2_logo.png differ diff --git a/Downstream/Open-Set-Action-Recognition/docs/imgs/mmaction2_overview.gif b/Downstream/Open-Set-Action-Recognition/docs/imgs/mmaction2_overview.gif new file mode 100644 index 0000000..9e77ed8 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/docs/imgs/mmaction2_overview.gif differ diff --git a/Downstream/Open-Set-Action-Recognition/docs/index.rst b/Downstream/Open-Set-Action-Recognition/docs/index.rst new file mode 100644 index 0000000..39bfd1d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/index.rst @@ -0,0 +1,63 @@ +Welcome to MMAction2's documentation! +===================================== + +.. toctree:: + :maxdepth: 2 + + install.md + getting_started.md + demo.md + benchmark.md + config.md + +.. toctree:: + :maxdepth: 2 + :caption: Datasets + + data_preparation.md + supported_datasets.md + +.. toctree:: + :maxdepth: 2 + :caption: Model Zoo + + modelzoo.md + recognition_models.md + localization_models.md + +.. toctree:: + :maxdepth: 2 + :caption: Tutorials + + tutorials/finetune.md + tutorials/new_dataset.md + tutorials/data_pipeline.md + tutorials/new_modules.md + tutorials/export_model.md + tutorials/customize_runtime.md + +.. toctree:: + :maxdepth: 2 + :caption: Useful Tools and Scripts + + useful_tools.md + +.. toctree:: + :maxdepth: 2 + :caption: Notes + + changelog.md + faq.md + +.. toctree:: + :caption: API Reference + + api.rst + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/Downstream/Open-Set-Action-Recognition/docs/install.md b/Downstream/Open-Set-Action-Recognition/docs/install.md new file mode 100644 index 0000000..fb3a889 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/install.md @@ -0,0 +1,218 @@ +## Installation + +We provide some tips for MMAction2 installation in this file. + + + +- [Requirements](#requirements) +- [Install MMAction2](#install-mmaction2) +- [Install with CPU only](#install-with-cpu-only) +- [Another option: Docker Image](#another-option--docker-image) +- [A from-scratch setup script](#a-from-scratch-setup-script) +- [Developing with multiple MMAction2 versions](#developing-with-multiple-mmaction2-versions) +- [Verification](#verification) + + + +### Requirements + +- Linux (Windows is not officially supported) +- Python 3.6+ +- PyTorch 1.3+ +- CUDA 9.2+ (If you build PyTorch from source, CUDA 9.0 is also compatible) +- GCC 5+ +- [mmcv](https://github.com/open-mmlab/mmcv) 1.1.1+ +- Numpy +- ffmpeg (4.2 is preferred) +- [decord](https://github.com/dmlc/decord) (optional, 0.4.1+): Install CPU version by `pip install decord==0.4.1` and install GPU version from source +- [PyAV](https://github.com/mikeboers/PyAV) (optional): `conda install av -c conda-forge -y` +- [PyTurboJPEG](https://github.com/lilohuang/PyTurboJPEG) (optional): `pip install PyTurboJPEG` +- [denseflow](https://github.com/open-mmlab/denseflow) (optional): See [here](https://github.com/innerlee/setup) for simple install scripts. +- [moviepy](https://zulko.github.io/moviepy/) (optional): `pip install moviepy`. See [here](https://zulko.github.io/moviepy/install.html) for official installation. **Note**(according to [this issue](https://github.com/Zulko/moviepy/issues/693)) that: + 1. For Windows users, [ImageMagick](https://www.imagemagick.org/script/index.php) will not be automatically detected by MoviePy, + there is a need to modify `moviepy/config_defaults.py` file by providing the path to the ImageMagick binary called `magick`, like `IMAGEMAGICK_BINARY = "C:\\Program Files\\ImageMagick_VERSION\\magick.exe"` + 2. For Linux users, there is a need to modify the `/etc/ImageMagick-6/policy.xml` file by commenting out + `` to ``, if [ImageMagick](https://www.imagemagick.org/script/index.php) is not detected by `moviepy`. +- [Pillow-SIMD](https://docs.fast.ai/performance.html#pillow-simd) (optional): Install it by the following scripts. +```shell +conda uninstall -y --force pillow pil jpeg libtiff libjpeg-turbo +pip uninstall -y pillow pil jpeg libtiff libjpeg-turbo +conda install -yc conda-forge libjpeg-turbo +CFLAGS="${CFLAGS} -mavx2" pip install --upgrade --no-cache-dir --force-reinstall --no-binary :all: --compile pillow-simd +conda install -y jpeg libtiff +``` + +**Note**: You need to run `pip uninstall mmcv` first if you have mmcv installed. +If mmcv and mmcv-full are both installed, there will be `ModuleNotFoundError`. + +### Install MMAction2 + +a. Create a conda virtual environment and activate it. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab +``` + +b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g., + +```shell +conda install pytorch torchvision -c pytorch +``` + +Note: Make sure that your compilation CUDA version and runtime CUDA version match. +You can check the supported CUDA version for precompiled packages on the [PyTorch website](https://pytorch.org/). + +`E.g.1` If you have CUDA 10.1 installed under `/usr/local/cuda` and would like to install PyTorch 1.5, +you need to install the prebuilt PyTorch with CUDA 10.1. + +```shell +conda install pytorch cudatoolkit=10.1 torchvision -c pytorch +``` + +`E.g.2` If you have CUDA 9.2 installed under `/usr/local/cuda` and would like to install PyTorch 1.3.1., +you need to install the prebuilt PyTorch with CUDA 9.2. + +```shell +conda install pytorch=1.3.1 cudatoolkit=9.2 torchvision=0.4.2 -c pytorch +``` + +If you build PyTorch from source instead of installing the prebuilt package, you can use more CUDA versions such as 9.0. + +c. Install mmcv, we recommend you to install the pre-build mmcv as below. + +```shell +pip install mmcv-full==latest+torch1.5.0+cu101 -f https://download.openmmlab.com/mmcv/dist/index.html +``` + +See [here](https://github.com/open-mmlab/mmcv#install-with-pip) for different versions of MMCV compatible to different PyTorch and CUDA versions. +Optionally you can choose to compile mmcv from source by the following command + +```shell +git clone https://github.com/open-mmlab/mmcv.git +cd mmcv +MMCV_WITH_OPS=1 pip install -e . # package mmcv-full, which contains cuda ops, will be installed after this step +# OR pip install -e . # package mmcv, which contains no cuda ops, will be installed after this step +cd .. +``` + +Or directly run + +```shell +pip install mmcv-full +# alternative: pip install mmcv +``` + +**Important:** You need to run `pip uninstall mmcv` first if you have mmcv installed. If mmcv and mmcv-full are both installed, there will be `ModuleNotFoundError`. + +d. Clone the MMAction2 repository + +```shell +git clone https://github.com/open-mmlab/mmaction2.git +cd mmaction2 +``` + +d. Install build requirements and then install MMAction2. + +```shell +pip install -r requirements/build.txt +pip install -v -e . # or "python setup.py develop" +``` + +If you build MMAction2 on macOS, replace the last command with + +``` +CC=clang CXX=clang++ CFLAGS='-stdlib=libc++' pip install -e . +``` + + +Note: + +1. The git commit id will be written to the version number with step d, e.g. 0.6.0+2e7045c. The version will also be saved in trained models. +It is recommended that you run step d each time you pull some updates from github. If C++/CUDA codes are modified, then this step is compulsory. + +2. Following the above instructions, MMAction2 is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number). + +3. If you would like to use `opencv-python-headless` instead of `opencv-python`, +you can install it before installing MMCV. + +4. If you would like to use `PyAV`, you can install it with `conda install av -c conda-forge -y`. + +5. Some dependencies are optional. Running `python setup.py develop` will only install the minimum runtime requirements. +To use optional dependencies like `decord`, either install them with `pip install -r requirements/optional.txt` +or specify desired extras when calling `pip` (e.g. `pip install -v -e .[optional]`, +valid keys for the `[optional]` field are `all`, `tests`, `build`, and `optional`) like `pip install -v -e .[tests,build]`. + +### Install with CPU only +The code can be built for CPU only environment (where CUDA isn't available). + +In CPU mode you can run the demo/demo.py for example. + +### Another option: Docker Image + +We provide a [Dockerfile](/docker/Dockerfile) to build an image. + +```shell +# build an image with PyTorch 1.6.0, CUDA 10.1, CUDNN 7. +docker build -f ./docker/Dockerfile --rm -t mmaction2 . +``` + +**Important:** Make sure you've installed the [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). + +Run it with command: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmaction2/data mmaction2 +``` + +### A from-scratch setup script + +Here is a full script for setting up MMAction2 with conda and link the dataset path (supposing that your Kinetics-400 dataset path is $KINETICS400_ROOT). + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab + +# install latest pytorch prebuilt with the default prebuilt CUDA version (usually the latest) +conda install -c pytorch pytorch torchvision -y + +# install the latest mmcv or mmcv-full, here we take mmcv as example +pip install mmcv + +# install mmaction2 +git clone https://github.com/open-mmlab/mmaction2.git +cd mmaction2 +pip install -r requirements/build.txt +python setup.py develop + +mkdir data +ln -s $KINETICS400_ROOT data +``` + +### Developing with multiple MMAction2 versions + +The train and test scripts already modify the `PYTHONPATH` to ensure the script use the MMAction2 in the current directory. + +To use the default MMAction2 installed in the environment rather than that you are working with, you can remove the following line in those scripts. + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` + +### Verification + +To verify whether MMAction2 and the required environment are installed correctly, +we can run sample python codes to initialize a recognizer and inference a demo video: + +```python +import torch +from mmaction.apis import init_recognizer, inference_recognizer + +config_file = 'configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py' +device = 'cuda:0' # or 'cpu' +device = torch.device(device) + +model = init_recognizer(config_file, device=device) +# inference the demo video +inference_recognizer(model, 'demo/demo.mp4', 'demo/label_map.txt') +``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/make.bat b/Downstream/Open-Set-Action-Recognition/docs/make.bat new file mode 100644 index 0000000..922152e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/Downstream/Open-Set-Action-Recognition/docs/merge_docs.sh b/Downstream/Open-Set-Action-Recognition/docs/merge_docs.sh new file mode 100644 index 0000000..957e8be --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/merge_docs.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +sed -i '$a\\n' ../demo/README.md + +sed -i 's/(\/tools\/data\/activitynet\/README.md/(#activitynet/g' supported_datasets.md +sed -i 's/(\/tools\/data\/kinetics\/README.md/(#kinetics-400-600-700/g' supported_datasets.md +sed -i 's/(\/tools\/data\/mit\/README.md/(#moments-in-time/g' supported_datasets.md +sed -i 's/(\/tools\/data\/mmit\/README.md/(#multi-moments-in-time/g' supported_datasets.md +sed -i 's/(\/tools\/data\/sthv1\/README.md/(#something-something-v1/g' supported_datasets.md +sed -i 's/(\/tools\/data\/sthv2\/README.md/(#something-something-v2/g' supported_datasets.md +sed -i 's/(\/tools\/data\/thumos14\/README.md/(#thumos-14/g' supported_datasets.md +sed -i 's/(\/tools\/data\/ucf101\/README.md/(#ucf-101/g' supported_datasets.md +sed -i 's/(\/tools\/data\/ucf101_24\/README.md/(#ucf101-24/g' supported_datasets.md +sed -i 's/(\/tools\/data\/jhmdb\/README.md/(#jhmdb/g' supported_datasets.md +sed -i 's/(\/tools\/data\/hvu\/README.md/(#hvu/g' supported_datasets.md +sed -i 's/(\/tools\/data\/hmdb51\/README.md/(#hmdb51/g' supported_datasets.md +sed -i 's/(\/tools\/data\/jester\/README.md/(#jester/g' supported_datasets.md +sed -i 's/(\/tools\/data\/ava\/README.md/(#ava/g' supported_datasets.md +sed -i 's/(\/tools\/data\/gym\/README.md/(#gym/g' supported_datasets.md + +cat ../configs/localization/*/*.md > localization_models.md +cat ../configs/recognition/*/*.md > recognition_models.md +cat ../configs/recognition_audio/*/*.md >> recognition_models.md +cat ../tools/data/*/*.md > prepare_data.md +cat ../demo/README.md > demo.md + +sed -i 's/#/##&/' localization_models.md +sed -i 's/#/##&/' recognition_models.md +sed -i 's/md###t/html#t/g' localization_models.md +sed -i 's/md###t/html#t/g' recognition_models.md +sed -i "s/md###t/html#t/g" demo.md + +sed -i 's/# Preparing/# /g' prepare_data.md +sed -i 's/#/##&/' prepare_data.md + +sed -i '1i\## Action Localization Models' localization_models.md +sed -i '1i\## Action Recognition Models' recognition_models.md + +cat prepare_data.md >> supported_datasets.md + +sed -i 's/](\/docs\//](/g' recognition_models.md # remove /docs/ for link used in doc site +sed -i 's/](\/docs\//](/g' localization_models.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' recognition_models.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' localization_models.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' benchmark.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' getting_started.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' install.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' config.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' changelog.md +sed -i 's/](\/docs\//](/g' ./tutorials/*.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' ./tutorials/*.md +sed -i 's/](\/docs\//](/g' supported_datasets.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' supported_datasets.md +sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' demo.md diff --git a/Downstream/Open-Set-Action-Recognition/docs/stat.py b/Downstream/Open-Set-Action-Recognition/docs/stat.py new file mode 100644 index 0000000..8555089 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/stat.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +import functools as func +import glob +import re + +files = sorted(glob.glob('*_models.md')) + +stats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # count papers + papers = set(x.lower().strip() + for x in re.findall(r'\btitle={(.*)}', content)) + paperlist = '\n'.join(sorted(' - ' + x for x in papers)) + + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'https.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmaction' in x) + + statsmsg = f""" +## [{title}]({f}) + +* Number of checkpoints: {len(ckpts)} +* Number of configs: {len(configs)} +* Number of papers: {len(papers)} +{paperlist} + + """ + + stats.append((papers, configs, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) +allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) +allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) +msglist = '\n'.join(x for _, _, _, x in stats) + +modelzoo = f""" +# Model Zoo Statistics + +* Number of checkpoints: {len(allckpts)} +* Number of configs: {len(allconfigs)} +* Number of papers: {len(allpapers)} + +{msglist} +""" + +with open('modelzoo.md', 'w') as f: + f.write(modelzoo) diff --git a/Downstream/Open-Set-Action-Recognition/docs/supported_datasets.md b/Downstream/Open-Set-Action-Recognition/docs/supported_datasets.md new file mode 100644 index 0000000..1caccee --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/supported_datasets.md @@ -0,0 +1,21 @@ +# Supported Datasets + +- [UCF101](https://www.crcv.ucf.edu/research/data-sets/ucf101/): See [preparing_ucf101](/tools/data/ucf101/README.md). +- [HMDB51](https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/): See [preparing_hmdb51](/tools/data/hmdb51/README.md). +- [Kinetics-[400/600/700]](https://deepmind.com/research/open-source/kinetics): See [preparing_kinetics](/tools/data/kinetics/README.md) +- [THUMOS14](https://www.crcv.ucf.edu/THUMOS14/download.html): See [preparing_thumos14](/tools/data/thumos14/README.md) +- [Something-Something V1](https://20bn.com/datasets/something-something/v1): See [preparing_sthv1](/tools/data/sthv1/README.md) +- [Something-Something V2](https://20bn.com/datasets/something-something): See [preparing_sthv2](/tools/data/sthv2/README.md) +- [Moments in Time](http://moments.csail.mit.edu/): See [preparing_mit](/tools/data/mit/README.md) +- [Multi-Moments in Time](http://moments.csail.mit.edu/challenge_iccv_2019.html): See [preparing_mmit](/tools/data/mmit/README.md) +- [ActivityNet](http://activity-net.org/): See [praparing_activitynet](/tools/data/activitynet/README.md) +- [UCF101-24](http://www.thumos.info/download.html): See [preparing_ucf101_24](/tools/data/ucf101_24/README.md) +- [JHMDB](http://jhmdb.is.tue.mpg.de/): See [preparing_jhmdb](/tools/data/jhmdb/README.md) +- [HVU](https://github.com/holistic-video-understanding/HVU-Dataset): See [preparing_hvu](/tools/data/hvu/README.md) +- [Jester](https://20bn.com/datasets/jester/v1): See [preparing_jester](/tools/data/jester/README.md) +- [AVA](https://research.google.com/ava/index.html): See [preparing_ava](/tools/data/ava/README.md) +- [GYM](https://sdolivia.github.io/FineGym/): See [preparing_gym](/tools/data/gym/README.md) + +The supported datasets are listed above. +We provide shell scripts for data preparation under the path `$MMACTION2/tools/data/`. +Below is the detailed tutorials of data deployment for each dataset. diff --git a/Downstream/Open-Set-Action-Recognition/docs/tutorials/customize_runtime.md b/Downstream/Open-Set-Action-Recognition/docs/tutorials/customize_runtime.md new file mode 100644 index 0000000..60199cf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/tutorials/customize_runtime.md @@ -0,0 +1,353 @@ +# Tutorial 6: Customize Runtime Settings + +In this tutorial, we will introduce some methods about how to customize optimization methods, training schedules, workflow and hooks when running your own settings for the project. + + + +- [Customize Optimization Methods](#customize-optimization-methods) + * [Customize optimizer supported by PyTorch](#customize-optimizer-supported-by-pytorch) + * [Customize self-implemented optimizer](#customize-self-implemented-optimizer) + + [1. Define a new optimizer](#1-define-a-new-optimizer) + + [2. Add the optimizer to registry](#2-add-the-optimizer-to-registry) + + [3. Specify the optimizer in the config file](#3-specify-the-optimizer-in-the-config-file) + * [Customize optimizer constructor](#customize-optimizer-constructor) + * [Additional settings](#additional-settings) +- [Customize Training Schedules](#customize-training-schedules) +- [Customize Workflow](#customize-workflow) +- [Customize Hooks](#customize-hooks) + * [Customize self-implemented hooks](#customize-self-implemented-hooks) + + [1. Implement a new hook](#1-implement-a-new-hook) + + [2. Register the new hook](#2-register-the-new-hook) + + [3. Modify the config](#3-modify-the-config) + * [Use hooks implemented in MMCV](#use-hooks-implemented-in-mmcv) + * [Modify default runtime hooks](#modify-default-runtime-hooks) + + [Checkpoint config](#checkpoint-config) + + [Log config](#log-config) + + [Evaluation config](#evaluation-config) + + + +## Customize Optimization Methods + +### Customize optimizer supported by PyTorch + +We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field of config files. +For example, if you want to use `Adam`, the modification could be as the following. + +```python +optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) +``` + +To modify the learning rate of the model, the users only need to modify the `lr` in the config of optimizer. +The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. + +For example, if you want to use `Adam` with the setting like `torch.optim.Adam(parms, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)` in PyTorch, +the modification could be set as the following. + +```python +optimizer = dict(type='Adam', lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) +``` + +### Customize self-implemented optimizer + +#### 1. Define a new optimizer + +A customized optimizer could be defined as following. + +Assume you want to add an optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. +You need to create a new directory named `mmaction/core/optimizer`. +And then implement the new optimizer in a file, e.g., in `mmaction/core/optimizer/my_optimizer.py`: + +```python +from .registry import OPTIMIZERS +from torch.optim import Optimizer + + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + +``` + +#### 2. Add the optimizer to registry + +To find the above module defined above, this module should be imported into the main namespace at first. There are two ways to achieve it. + +- Modify `mmaction/core/optimizer/__init__.py` to import it. + + The newly defined module should be imported in `mmaction/core/optimizer/__init__.py` so that the registry will + find the new module and add it: + +```python +from .my_optimizer import MyOptimizer +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmaction.core.optimizer.my_optimizer'], allow_failed_imports=False) +``` + +The module `mmaction.core.optimizer.my_optimizer` will be imported at the beginning of the program and the class `MyOptimizer` is then automatically registered. +Note that only the package containing the class `MyOptimizer` should be imported. `mmaction.core.optimizer.my_optimizer.MyOptimizer` **cannot** be imported directly. + +#### 3. Specify the optimizer in the config file + +Then you can use `MyOptimizer` in `optimizer` field of config files. +In the configs, the optimizers are defined by the field `optimizer` like the following: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +To use your own optimizer, the field can be changed to + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +### Customize optimizer constructor + +Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. +The users can do those fine-grained parameter tuning through customizing optimizer constructor. + +```python +from mmcv.utils import build_from_cfg + +from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS +from mmaction.utils import get_root_logger +from .my_optimizer import MyOptimizer + + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor: + + def __init__(self, optimizer_cfg, paramwise_cfg=None): + pass + + def __call__(self, model): + + return my_optimizer +``` + +The default optimizer constructor is implemented [here](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11), +which could also serve as a template for new optimizer constructor. + +### Additional settings + +Tricks not implemented by the optimizer should be implemented through optimizer constructor (e.g., set parameter-wise learning rates) or hooks. +We list some common settings that could stabilize the training or accelerate the training. Feel free to create PR, issue for more settings. + +- __Use gradient clip to stabilize training__: + Some models need gradient clip to clip the gradients to stabilize the training process. An example is as below: + + ```python + optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) + ``` + +- __Use momentum schedule to accelerate model convergence__: + We support momentum scheduler to modify model's momentum according to learning rate, which could make the model converge in a faster way. + Momentum scheduler is usually used with LR scheduler, for example, the following config is used in 3D detection to accelerate convergence. + For more details, please refer to the implementation of [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) + and [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130). + + ```python + lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + ) + momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, + ) + ``` + +## Customize Training Schedules + +we use step learning rate with default value in config files, this calls [`StepLRHook`](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L153) in MMCV. +We support many other learning rate schedule [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py), such as `CosineAnnealing` and `Poly` schedule. Here are some examples + +- Poly schedule: + + ```python + lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) + ``` + +- ConsineAnnealing schedule: + + ```python + lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + ``` + +## Customize Workflow + +By default, we recommend users to use `EpochEvalHook` to do evaluation after training epoch, but they can still use `val` workflow as an alternative. + +Workflow is a list of (phase, epochs) to specify the running order and epochs. By default it is set to be + +```python +workflow = [('train', 1)] +``` + +which means running 1 epoch for training. +Sometimes user may want to check some metrics (e.g. loss, accuracy) about the model on the validate set. +In such case, we can set the workflow as + +```python +[('train', 1), ('val', 1)] +``` + +so that 1 epoch for training and 1 epoch for validation will be run iteratively. + +**Note**: + +1. The parameters of model will not be updated during val epoch. +2. Keyword `total_epochs` in the config only controls the number of training epochs and will not affect the validation workflow. +3. Workflows `[('train', 1), ('val', 1)]` and `[('train', 1)]` will not change the behavior of `EpochEvalHook` because `EpochEvalHook` is called by `after_train_epoch` and validation workflow only affect hooks that are called through `after_val_epoch`. +Therefore, the only difference between `[('train', 1), ('val', 1)]` and ``[('train', 1)]`` is that the runner will calculate losses on validation set after each training epoch. + +## Customize Hooks + +### Customize self-implemented hooks + +#### 1. Implement a new hook + +Here we give an example of creating a new hook in MMAction2 and using it in training. + +```python +from mmcv.runner import HOOKS, Hook + + +@HOOKS.register_module() +class MyHook(Hook): + + def __init__(self, a, b): + pass + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass +``` + +Depending on the functionality of the hook, the users need to specify what the hook will do at each stage of the training in `before_run`, `after_run`, `before_epoch`, `after_epoch`, `before_iter`, and `after_iter`. + +#### 2. Register the new hook + +Then we need to make `MyHook` imported. Assuming the file is in `mmaction/core/utils/my_hook.py` there are two ways to do that: + +- Modify `mmaction/core/utils/__init__.py` to import it. + + The newly defined module should be imported in `mmaction/core/utils/__init__.py` so that the registry will + find the new module and add it: + +```python +from .my_hook import MyHook +``` + +- Use `custom_imports` in the config to manually import it + +```python +custom_imports = dict(imports=['mmaction.core.utils.my_hook'], allow_failed_imports=False) +``` + +#### 3. Modify the config + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value) +] +``` + +You can also set the priority of the hook by adding key `priority` to `'NORMAL'` or `'HIGHEST'` as below + +```python +custom_hooks = [ + dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +By default the hook's priority is set as `NORMAL` during registration. + + +### Use hooks implemented in MMCV + +If the hook is already implemented in MMCV, you can directly modify the config to use the hook as below + +```python +mmcv_hooks = [ + dict(type='MMCVHook', a=a_value, b=b_value, priority='NORMAL') +] +``` + +### Modify default runtime hooks + +There are some common hooks that are not registered through `custom_hooks` but has been registered by default when importing MMCV, they are + +- log_config +- checkpoint_config +- evaluation +- lr_config +- optimizer_config +- momentum_config + +In those hooks, only the logger hook has the `VERY_LOW` priority, others' priority are `NORMAL`. +The above-mentioned tutorials already cover how to modify `optimizer_config`, `momentum_config`, and `lr_config`. +Here we reveals how what we can do with `log_config`, `checkpoint_config`, and `evaluation`. + +#### Checkpoint config + +The MMCV runner will use `checkpoint_config` to initialize [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9). + +```python +checkpoint_config = dict(interval=1) +``` + +The users could set `max_keep_ckpts` to only save only small number of checkpoints or decide whether to store state dict of optimizer by `save_optimizer`. +More details of the arguments are [here](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) + +#### Log config + +The `log_config` wraps multiple logger hooks and enables to set intervals. Now MMCV supports `WandbLoggerHook`, `MlflowLoggerHook`, and `TensorboardLoggerHook`. +The detail usages can be found in the [doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook). + +```python +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +``` + +#### Evaluation config + +The config of `evaluation` will be used to initialize the [`EpochEvalHook`](https://github.com/open-mmlab/mmaction2/blob/master/mmaction/core/evaluation/eval_hooks.py#L12). +Except the key `interval`, other arguments such as `metrics` will be passed to the `dataset.evaluate()` + +```python +evaluation = dict(interval=1, metrics='bbox') +``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/tutorials/data_pipeline.md b/Downstream/Open-Set-Action-Recognition/docs/tutorials/data_pipeline.md new file mode 100644 index 0000000..d305987 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/tutorials/data_pipeline.md @@ -0,0 +1,213 @@ +# Tutorial 3: Customize Data Pipelines + +In this tutorial, we will introduce some methods about the design of data pipelines, and how to customize and extend your own data pipelines for the project. + + + +- [Design of Data Pipelines](#design-of-data-pipelines) + * [Data loading](#data-loading) + * [Pre-processing](#pre-processing) + * [Formatting](#formatting) +- [Extend and Use Custom Pipelines](#extend-and-use-custom-pipelines) + + + +## Design of Data Pipelines + +Following typical conventions, we use `Dataset` and `DataLoader` for data loading +with multiple workers. `Dataset` returns a dict of data items corresponding +the arguments of models' forward method. +Since the data in action recognition & localization may not be the same size (image size, gt bbox size, etc.), +The `DataContainer` in MMCV is used to help collect and distribute data of different sizes. +See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. + +The data preparation pipeline and the dataset is decomposed. Usually a dataset +defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. +A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next operation. + +We present a typical pipeline in the following figure. The blue blocks are pipeline operations. +With the pipeline going on, each operator can add new keys (marked as green) to the result dict or update the existing keys (marked as orange). +![pipeline figure](../imgs/data_pipeline.png) + +The operations are categorized into data loading, pre-processing and formatting. + +Here is a pipeline example for TSN. +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) +train_pipeline = [ + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), + dict(type='RawFrameDecode', io_backend='disk'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) +] +val_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=3, + test_mode=True), + dict(type='RawFrameDecode', io_backend='disk'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='RawFrameDecode', io_backend='disk'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='Flip', flip_ratio=0), + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) +] +``` + +For each operation, we list the related dict fields that are added/updated/removed. + +### Data loading + +`SampleFrames` +- add: frame_inds, clip_len, frame_interval, num_clips, *total_frames + +`DenseSampleFrames` +- add: frame_inds, clip_len, frame_interval, num_clips, *total_frames + +`PyAVDecode` +- add: imgs, original_shape +- update: *frame_inds + +`DecordDecode` +- add: imgs, original_shape +- update: *frame_inds + +`OpenCVDecode` +- add: imgs, original_shape +- update: *frame_inds + +`RawFrameDecode` +- add: imgs, original_shape +- update: *frame_inds + +### Pre-processing + +`RandomCrop` +- add: crop_bbox, img_shape +- update: imgs + +`RandomResizedCrop` +- add: crop_bbox, img_shape +- update: imgs + +`MultiScaleCrop` +- add: crop_bbox, img_shape, scales +- update: imgs + +`Resize` +- add: img_shape, keep_ratio, scale_factor +- update: imgs + +`Flip` +- add: flip, flip_direction +- update: imgs + +`Normalize` +- add: img_norm_cfg +- update: imgs + +`CenterCrop` +- add: crop_bbox, img_shape +- update: imgs + +`ThreeCrop` +- add: crop_bbox, img_shape +- update: imgs + +`TenCrop` +- add: crop_bbox, img_shape +- update: imgs + +`MultiGroupCrop` +- add: crop_bbox, img_shape +- update: imgs + +### Formatting + +`ToTensor` +- update: specified by `keys`. + +`ImageToTensor` +- update: specified by `keys`. + +`Transpose` +- update: specified by `keys`. + +`Collect` +- add: img_meta (the keys of img_meta is specified by `meta_keys`) +- remove: all other keys except for those specified by `keys` + +It is **noteworthy** that the first key, commonly `imgs`, will be used as the main key to calculate the batch size. + +`FormatShape` +- add: input_shape +- update: imgs + +## Extend and Use Custom Pipelines + +1. Write a new pipeline in any file, e.g., `my_pipeline.py`. It takes a dict as input and return a dict. + + ```python + from mmaction.datasets import PIPELINES + + @PIPELINES.register_module() + class MyTransform: + + def __call__(self, results): + results['key'] = value + return results + ``` + +2. Import the new class. + + ```python + from .my_pipeline import MyTransform + ``` + +3. Use it in config files. + + ```python + img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + train_pipeline = [ + dict(type='DenseSampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='RawFrameDecode', io_backend='disk'), + dict(type='MyTransform'), # use a custom pipeline + dict(type='Normalize', **img_norm_cfg), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs', 'label']) + ] + ``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/tutorials/export_model.md b/Downstream/Open-Set-Action-Recognition/docs/tutorials/export_model.md new file mode 100644 index 0000000..9d046e8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/tutorials/export_model.md @@ -0,0 +1,62 @@ +# Tutorial 5: Exporting a model to ONNX + +Open Neural Network Exchange [(ONNX)](https://onnx.ai/) is an open ecosystem that empowers AI developers to choose the right tools as their project evolves. + + + +- [Supported Models](#supported-models) +- [Usage](#usage) + * [Prerequisite](#prerequisite) + * [Recognizers](#recognizers) + * [Localizer](#localizer) + + + +## Supported Models +So far, our codebase supports onnx exporting from pytorch models trained with MMAction2. The supported models are: + ++ I3D ++ TSN ++ TIN ++ TSM ++ R(2+1)D ++ SLOWFAST ++ SLOWONLY ++ BMN ++ BSN(tem, pem) + +## Usage +For simple exporting, you can use the [script](/tools/pytorch2onnx.py) here. Note that the package `onnx` and `onnxruntime` are required for verification after exporting. + +### Prerequisite +First, install onnx. +```shell +pip install onnx onnxruntime +``` + +We provide a python script to export the pytorch model trained by MMAction2 to ONNX. +```shell +python tools/pytorch2onnx.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--shape ${SHAPE}] \ + [--verify] [--show] [--output-file ${OUTPUT_FILE}] [--is-localizer] [--opset-version ${VERSION}] +``` +Optional arguments: ++ `--shape`: The shape of input tensor to the model. For 2D recognizer(e.g. TSN), the input should be `$batch $clip $channel $height $width`(e.g. `1 1 3 224 224`); For 3D recognizer(e.g. I3D), the input should be `$batch $clip $channel $time $height $width`(e.g. `1 1 3 32 224 224`); For localizer such as BSN, the input for each module is different, please check the `forward` function for it. If not specified, it will be set to `1 1 3 224 224`. ++ `--verify`: Determines whether to verify the exported model, runnably and numerically. If not specified, it will be set to `False`. ++ `--show`: Determines whether to print the architecture of the exported model. If not specified, it will be set to `False`. ++ `--output-file`: The output onnx model name. If not specified, it will be set to `tmp.onnx`. ++ `--is-localizer`: Determines whether the model to be exported is a localizer. If not specified, it will be set to `False`. ++ `--opset-version`: Determines the operation set version of onnx, we recommend you to use a higher version such as 11 for compatibility. If not specified, it will be set to `11`. + +### Recognizers +For recognizers, please run: +```shell +python tools/pytorch2onnx.py $CONFIG_PATH $CHECKPOINT_PATH --shape $SHAPE --verify +``` + +### Localizer +For localizers, please run: +```shell +python tools/pytorch2onnx.py $CONFIG_PATH $CHECKPOINT_PATH --is-localizer --shape $SHAPE --verify +``` + +Please fire an issue if you discover any checkpoints that are not perfectly exported or suffer some loss in accuracy. diff --git a/Downstream/Open-Set-Action-Recognition/docs/tutorials/finetune.md b/Downstream/Open-Set-Action-Recognition/docs/tutorials/finetune.md new file mode 100644 index 0000000..6b87c20 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/tutorials/finetune.md @@ -0,0 +1,95 @@ +# Tutorial 1: Finetuning Models + +This tutorial provides instructions for users to use the pre-trained models +to finetune them on other datasets, so that better performance can be get. + + + +- [Outline](#outline) +- [Modify Head](#modify-head) +- [Modify Dataset](#modify-dataset) +- [Modify Training Schedule](#modify-training-schedule) +- [Use Pre-Trained Model](#use-pre-trained-model) + + + +## Outline + +There are two steps to finetune a model on a new dataset. + +1. Add support for the new dataset. See [Tutorial 2: Adding New Dataset](new_dataset.md). +1. Modify the configs. This will be discussed in this tutorial. + +For example, if the user want to finetune models pre-trained on Kinetics-400 Dataset to another dataset, say UCF101, +then four parts in the config (see [here](../config.md)) needs attention. + +## Modify Head + +The `num_classes` in the `cls_head` need to be changed to the class number of the new dataset. +The weights of the pre-trained models are reused except for the final prediction layer. +So it is safe to change the class number. +In our case, UCF101 has 101 classes. +So we change it from 400 (class number of Kinetics-400) to 101. + +```python +model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + pretrained='torchvision://resnet50', + depth=50, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=101, # change from 400 to 101 + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01)) +``` + +Note that the `pretrained='torchvision://resnet50'` setting is used for initializing backbone. +If you are training a new model from ImageNet-pretrained weights, this is for you. +However, this setting is not related to our task at hand. +What we need is `load_from`, which will be discussed later. + +## Modify Dataset + +MMAction2 supports UCF101, Kinetics-400, Moments in Time, Multi-Moments in Time, THUMOS14, +Something-Something V1&V2, ActivityNet Dataset. +The users may need to adapt one of the above dataset to fit for their special datasets. +In our case, UCF101 is already supported by various dataset types, like `RawframeDataset`, +so we change the config as follows. + +```python +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'data/ucf101/rawframes_train/' +data_root_val = 'data/ucf101/rawframes_val/' +ann_file_train = 'data/ucf101/ucf101_train_list.txt' +ann_file_val = 'data/ucf101/ucf101_val_list.txt' +ann_file_test = 'data/ucf101/ucf101_val_list.txt' +``` + +## Modify Training Schedule + +Finetuning usually requires smaller learning rate and less training epochs. + +```python +# optimizer +optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) # change from 0.01 to 0.005 +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +# learning policy +lr_config = dict(policy='step', step=[40, 80]) +total_epochs = 50 # change from 100 to 50 +checkpoint_config = dict(interval=5) +``` + +## Use Pre-Trained Model +To use the pre-trained model for the whole network, the new config adds the link of pre-trained models in the `load_from`. + +```python +# use the pre-trained model for the whole TSN network +load_from = 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/mmaction-v1/recognition/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth' # model path can be found in model zoo +``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/tutorials/new_dataset.md b/Downstream/Open-Set-Action-Recognition/docs/tutorials/new_dataset.md new file mode 100644 index 0000000..e55844a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/tutorials/new_dataset.md @@ -0,0 +1,248 @@ +# Tutorial 2: Adding New Dataset + +In this tutorial, we will introduce some methods about how to customize your own dataset by reorganizing data and mixing dataset for the project. + + + +- [Customize Datasets by Reorganizing Data](#customize-datasets-by-reorganizing-data) + * [Reorganize datasets to existing format](#reorganize-datasets-to-existing-format) + * [An example of a custom dataset](#an-example-of-a-custom-dataset) +- [Customize Dataset by Mixing Dataset](#customize-dataset-by-mixing-dataset) + * [Repeat dataset](#repeat-dataset) + + + +## Customize Datasets by Reorganizing Data + +### Reorganize datasets to existing format + +The simplest way is to convert your dataset to existing dataset formats (RawframeDataset or VideoDataset). + +There are three kinds of annotation files. + +- rawframe annotation + + The annotation of a rawframe dataset is a text file with multiple lines, + and each line indicates `frame_directory` (relative path) of a video, + `total_frames` of a video and the `label` of a video, which are split by a whitespace. + + Here is an example. + ``` + some/directory-1 163 1 + some/directory-2 122 1 + some/directory-3 258 2 + some/directory-4 234 2 + some/directory-5 295 3 + some/directory-6 121 3 + ``` + +- video annotation + + The annotation of a video dataset is a text file with multiple lines, + and each line indicates a sample video with the `filepath` (relative path) and `label`, + which are split by a whitespace. + + Here is an example. + ``` + some/path/000.mp4 1 + some/path/001.mp4 1 + some/path/002.mp4 2 + some/path/003.mp4 2 + some/path/004.mp4 3 + some/path/005.mp4 3 + ``` + +- ActivityNet annotation + The annotation of ActivityNet dataset is a json file. Each key is a video name + and the corresponding value is the meta data and annotation for the video. + + Here is an example. + ``` + { + "video1": { + "duration_second": 211.53, + "duration_frame": 6337, + "annotations": [ + { + "segment": [ + 30.025882995319815, + 205.2318595943838 + ], + "label": "Rock climbing" + } + ], + "feature_frame": 6336, + "fps": 30.0, + "rfps": 29.9579255898 + }, + "video2": { + "duration_second": 26.75, + "duration_frame": 647, + "annotations": [ + { + "segment": [ + 2.578755070202808, + 24.914101404056165 + ], + "label": "Drinking beer" + } + ], + "feature_frame": 624, + "fps": 24.0, + "rfps": 24.1869158879 + } + } + ``` + +There are two ways to work with custom datasets. + +- online conversion + + You can write a new Dataset class inherited from [BaseDataset](/mmaction/datasets/base.py), and overwrite three methods + `load_annotations(self)`, `evaluate(self, results, metrics, logger)` and `dump_results(self, results, out)`, + like [RawframeDataset](/mmaction/datasets/rawframe_dataset.py), [VideoDataset](/mmaction/datasets/video_dataset.py) or [ActivityNetDataset](/mmaction/datasets/activitynet_dataset.py). + +- offline conversion + + You can convert the annotation format to the expected format above and save it to + a pickle or json file, then you can simply use `RawframeDataset`, `VideoDataset` or `ActivityNetDataset`. + +After the data pre-processing, the users need to further modify the config files to use the dataset. +Here is an example of using a custom dataset in rawframe format. + +In `configs/task/method/my_custom_config.py`: + +```python +... +# dataset settings +dataset_type = 'RawframeDataset' +data_root = 'path/to/your/root' +data_root_val = 'path/to/your/root_val' +ann_file_train = 'data/custom/custom_train_list.txt' +ann_file_val = 'data/custom/custom_val_list.txt' +ann_file_test = 'data/custom/custom_val_list.txt' +... +data = dict( + videos_per_gpu=32, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + ...), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + ...), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + ...)) +... +``` + +We use this way to support Rawframe dataset. + +### An example of a custom dataset + +Assume the annotation is in a new format in text files, and the image file name is of template like `img_00005.jpg` +The video annotations are stored in text file `annotation.txt` as following + +``` +directory,total frames,class +D32_1gwq35E,299,66 +-G-5CJ0JkKY,249,254 +T4h1bvOd9DA,299,33 +4uZ27ivBl00,299,341 +0LfESFkfBSw,249,186 +-YIsNpBEx6c,299,169 +``` + +We can create a new dataset in `mmaction/datasets/my_dataset.py` to load the data. + +```python +import copy +import os.path as osp + +import mmcv + +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class MyDataset(BaseDataset): + + def __init__(self, + ann_file, + pipeline, + data_prefix=None, + test_mode=False, + filename_tmpl='img_{:05}.jpg'): + super(MyDataset, self).__init__(ann_file, pipeline, test_mode) + + self.filename_tmpl = filename_tmpl + + def load_annotations(self): + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + if line.startswith("directory"): + continue + frame_dir, total_frames, label = line.split(',') + if self.data_prefix is not None: + frame_dir = osp.join(self.data_prefix, frame_dir) + video_infos.append( + dict( + frame_dir=frame_dir, + total_frames=int(total_frames), + label=int(label))) + return video_infos + + def prepare_train_frames(self, idx): + results = copy.deepcopy(self.video_infos[idx]) + results['filename_tmpl'] = self.filename_tmpl + return self.pipeline(results) + + def prepare_test_frames(self, idx): + results = copy.deepcopy(self.video_infos[idx]) + results['filename_tmpl'] = self.filename_tmpl + return self.pipeline(results) + + def evaluate(self, + results, + metrics='top_k_accuracy', + topk=(1, 5), + logger=None): + pass +``` + +Then in the config, to use `MyDataset` you can modify the config as the following + +```python +dataset_A_train = dict( + type='MyDataset', + ann_file = ann_file_train, + pipeline=train_pipeline +) +``` + +## Customize Dataset by Mixing Dataset + +MMAction2 also supports to mix dataset for training. Currently it supports to repeat dataset. + +### Repeat dataset + +We use `RepeatDataset` as wrapper to repeat the dataset. For example, suppose the original dataset as `Dataset_A`, +to repeat it, the config looks like the following + +```python +dataset_A_train = dict( + type='RepeatDataset', + times=N, + dataset=dict( # This is the original config of Dataset_A + type='Dataset_A', + ... + pipeline=train_pipeline + ) + ) +``` diff --git a/Downstream/Open-Set-Action-Recognition/docs/tutorials/new_modules.md b/Downstream/Open-Set-Action-Recognition/docs/tutorials/new_modules.md new file mode 100644 index 0000000..32be1b6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/tutorials/new_modules.md @@ -0,0 +1,283 @@ +# Tutorial 4: Adding New Modules + +In this tutorial, we will introduce some methods about how to customize optimizer, develop new components and new a learning rate scheduler for this project. + + + +- [Customize Optimizer](#customize-optimizer) +- [Customize Optimizer Constructor](#customize-optimizer-constructor) +- [Develop New Components](#develop-new-components) + * [Add new backbones](#add-new-backbones) + * [Add new heads](#add-new-heads) + * [Add new loss](#add-new-loss) +- [Add new learning rate scheduler (updater)](#add-new-learning-rate-scheduler--updater-) + + + +## Customize Optimizer + +An example of customized optimizer is [CopyOfSGD](/mmaction/core/optimizer/copy_of_sgd.py) is defined in `mmaction/core/optimizer/copy_of_sgd.py`. +More generally, a customized optimizer could be defined as following. + +Assume you want to add an optimizer named as `MyOptimizer`, which has arguments `a`, `b` and `c`. +You need to first implement the new optimizer in a file, e.g., in `mmaction/core/optimizer/my_optimizer.py`: + +```python +from .registry import OPTIMIZERS +from torch.optim import Optimizer + +@OPTIMIZERS.register_module() +class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): +``` + +Then add this module in `mmaction/core/optimizer/__init__.py`, thus the registry will find the new module and add it: + +```python +from .my_optimizer import MyOptimizer +``` + +Then you can use `MyOptimizer` in `optimizer` field of config files. +In the configs, the optimizers are defined by the field `optimizer` like the following: + +```python +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +``` + +To use your own optimizer, the field can be changed as + +```python +optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) +``` + +We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field of config files. +For example, if you want to use `ADAM`, though the performance will drop a lot, the modification could be as the following. + +```python +optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) +``` + +The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. + +## Customize Optimizer Constructor + +Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. +The users can do those fine-grained parameter tuning through customizing optimizer constructor. + +You can write a new optimizer constructor inherit from [DefaultOptimizerConstructor](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/optimizer/default_constructor.py) +and overwrite the `add_params(self, params, module)` method. + +An example of customized optimizer constructor is [TSMOptimizerConstructor](/mmaction/core/optimizer/tsm_optimizer_constructor.py). +More generally, a customized optimizer constructor could be defined as following. + +In `mmaction/core/optimizer/my_optimizer_constructor.py`: + +```python +from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor + +@OPTIMIZER_BUILDERS.register_module() +class MyOptimizerConstructor(DefaultOptimizerConstructor): + +``` + +In `mmaction/core/optimizer/__init__.py`: + +```python +from .my_optimizer_constructor import MyOptimizerConstructor +``` + +Then you can use `MyOptimizerConstructor` in `optimizer` field of config files. + +```python +# optimizer +optimizer = dict( + type='SGD', + constructor='MyOptimizerConstructor', + paramwise_cfg=dict(fc_lr5=True), + lr=0.02, + momentum=0.9, + weight_decay=0.0001) +``` + +## Develop New Components + +We basically categorize model components into 4 types. + +- recognizer: the whole recognizer model pipeline, usually contains a backbone and cls_head. +- backbone: usually an FCN network to extract feature maps, e.g., ResNet, BNInception. +- cls_head: the component for classification task, usually contains an FC layer with some pooling layers. +- localizer: the model for temporal localization task, currently available: BSN, BMN. + +### Add new backbones + +Here we show how to develop new components with an example of TSN. + +1. Create a new file `mmaction/models/backbones/resnet.py`. + + ```python + import torch.nn as nn + + from ..registry import BACKBONES + + @BACKBONES.register_module() + class ResNet(nn.Module): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): # should return a tuple + pass + + def init_weights(self, pretrained=None): + pass + ``` + +2. Import the module in `mmaction/models/backbones/__init__.py`. + + ```python + from .resnet import ResNet + ``` + +3. Use it in your config file. + + ```python + model = dict( + ... + backbone=dict( + type='ResNet', + arg1=xxx, + arg2=xxx), + ) + ``` + +### Add new heads + +Here we show how to develop a new head with the example of TSNHead as the following. + +1. Create a new file `mmaction/models/heads/tsn_head.py`. + + You can write a new classification head inheriting from [BaseHead](/mmaction/models/heads/base.py), + and overwrite `init_weights(self)` and `forward(self, x)` method. + + ```python + from ..registry import HEADS + from .base import BaseHead + + + @HEADS.register_module() + class TSNHead(BaseHead): + + def __init__(self, arg1, arg2): + pass + + def forward(self, x): + pass + + def init_weights(self): + pass + ``` + +2. Import the module in `mmaction/models/heads/__init__.py` + + ```python + from .tsn_head import TSNHead + ``` + +3. Use it in your config file + + ```python + model = dict( + ... + cls_head=dict( + type='TSNHead', + num_classes=400, + in_channels=2048, + arg1=xxx, + arg2=xxx), + ``` + +### Add new loss + +Assume you want to add a new loss as `MyLoss`. To add a new loss function, the users need implement it in `mmaction/models/losses/my_loss.py`. + +```python +import torch +import torch.nn as nn + +from ..builder import LOSSES + +def my_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + +@LOSSES.register_module() +class MyLoss(nn.Module): + + def forward(self, pred, target): + loss = my_loss(pred, target) + return loss +``` + +Then the users need to add it in the `mmaction/models/losses/__init__.py` + +```python +from .my_loss import MyLoss, my_loss +``` + +To use it, modify the `loss_xxx` field. Since MyLoss is for regression, we can use it for the bbox loss `loss_bbox`. + +```python +loss_bbox=dict(type='MyLoss')) +``` + +## Add new learning rate scheduler (updater) +The default manner of constructing a lr updater(namely, 'scheduler' by pytorch convention), is to modify the config such as: +```python +... +lr_config = dict(policy='step', step=[20, 40]) +... +``` +In the api for [`train.py`](/mmaction/apis/train.py), it will register the learning rate updater hook based on the config at: +```python +... + runner.register_training_hooks( + cfg.lr_config, + optimizer_config, + cfg.checkpoint_config, + cfg.log_config, + cfg.get('momentum_config', None)) +... +``` +So far, the supported updaters can be find in [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py), but if you want to customize a new learning rate updater, you may follow the steps below: + +1. First, write your own LrUpdaterHook in `$MMAction2/mmaction/core/lr`. The snippet followed is an example of cumtomized lr updater that uses learning rate based on a specific learning rate ratio: `lrs`, by which the learning rate decreases at each `steps`: +```python +@HOOKS.register_module() +# Register it here +class RelativeStepLrUpdaterHook(LrUpdaterHook): + # You should inheritate it from mmcv.LrUpdaterHook + def __init__(self, runner, steps, lrs, **kwargs): + super().__init__(**kwargs) + assert len(steps) == (len(lrs)) + self.steps = steps + self.lrs = lrs + + def get_lr(self, runner, base_lr): + # Only this function is required to override + # This function is called before each training epoch, return the specific learning rate here. + progress = runner.epoch if self.by_epoch else runner.iter + for i in range(len(self.steps)): + if progress < self.steps[i]: + return self.lrs[i] +``` + +2. Modify your config: +In your config file, swap the original `lr_config` by: +```python +lr_config = dict(policy='RelativeStep', steps=[20, 40, 60], lrs=[0.1, 0.01, 0.001]) +``` + +More examples can be found in [mmcv](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py). diff --git a/Downstream/Open-Set-Action-Recognition/docs/useful_tools.md b/Downstream/Open-Set-Action-Recognition/docs/useful_tools.md new file mode 100644 index 0000000..7ad6c8d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/docs/useful_tools.md @@ -0,0 +1,155 @@ +Apart from training/testing scripts, We provide lots of useful tools under the `tools/` directory. + +## Useful Tools Link + + + +- [Log Analysis](#log-analysis) +- [Model Complexity](#model-complexity) +- [Model Conversion](#model-conversion) + * [MMAction2 model to ONNX (experimental)](#mmaction2-model-to-onnx--experimental-) + * [Prepare a model for publishing](#prepare-a-model-for-publishing) +- [Miscellaneous](#miscellaneous) + * [Evaluating a metric](#evaluating-a-metric) + * [Print the entire config](#print-the-entire-config) + + + +## Log Analysis + +`tools/analysis/analyze_logs.py` plots loss/top-k acc curves given a training log file. Run `pip install seaborn` first to install the dependency. + +![acc_curve_image](imgs/acc_curve.png) + +```shell +python tools/analysis/analyze_logs.py plot_curve ${JSON_LOGS} [--keys ${KEYS}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] +``` + +Examples: + +- Plot the classification loss of some run. + + ```shell + python tools/analysis/analyze_logs.py plot_curve log.json --keys loss_cls --legend loss_cls + ``` + +- Plot the top-1 acc and top-5 acc of some run, and save the figure to a pdf. + + ```shell + python tools/analysis/analyze_logs.py plot_curve log.json --keys top1_acc top5_acc --out results.pdf + ``` + +- Compare the top-1 acc of two runs in the same figure. + + ```shell + python tools/analysis/analyze_logs.py plot_curve log1.json log2.json --keys top1_acc --legend run1 run2 + ``` + +You can also compute the average training speed. + + ```shell + python tools/analysis/analyze_logs.py cal_train_time ${JSON_LOGS} [--include-outliers] + ``` + +- Compute the average training speed for a config file + + ```shell + python tools/analysis/analyze_logs.py cal_train_time work_dirs/some_exp/20200422_153324.log.json + ``` + + The output is expected to be like the following. + + ```text + -----Analyze train time of work_dirs/some_exp/20200422_153324.log.json----- + slowest epoch 60, average time is 0.9736 + fastest epoch 18, average time is 0.9001 + time std over epochs is 0.0177 + average iter time: 0.9330 s/iter + ``` + +## Model Complexity + +`/tools/analysis/get_flops.py` is a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. + +```shell +python tools/analysis/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +We will get the result like this + +```text +============================== +Input shape: (1, 3, 32, 340, 256) +Flops: 37.1 GMac +Params: 28.04 M +============================== +``` + +**Note**: This tool is still experimental and we do not guarantee that the number is absolutely correct. +You may use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. + +(1) FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 340, 256) for 2D recognizer, (1, 3, 32, 340, 256) for 3D recognizer. +(2) Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. + +## Model Conversion + +### MMAction2 model to ONNX (experimental) + +`/tools/pytorch2onnx.py` is a script to convert model to [ONNX](https://github.com/onnx/onnx) format. +It also supports comparing the output results between Pytorch and ONNX model for verification. +Run `pip install onnx onnxruntime` first to install the dependency. + +- For recognizers, please run: + + ```shell + python tools/pytorch2onnx.py $CONFIG_PATH $CHECKPOINT_PATH --shape $SHAPE --verify + ``` + +- For localizers, please run: + + ```shell + python tools/pytorch2onnx.py $CONFIG_PATH $CHECKPOINT_PATH --is-localizer --shape $SHAPE --verify + ``` + +### Prepare a model for publishing + +`tools/publish_model.py` helps users to prepare their model for publishing. + +Before you upload a model to AWS, you may want to: + +(1) convert model weights to CPU tensors. +(2) delete the optimizer states. +(3) compute the hash of the checkpoint file and append the hash id to the filename. + +```shell +python tools/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} +``` + +E.g., + +```shell +python tools/publish_model.py work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb/latest.pth tsn_r50_1x1x3_100e_kinetics400_rgb.pth +``` + +The final output filename will be `tsn_r50_1x1x3_100e_kinetics400_rgb-{hash id}.pth`. + +## Miscellaneous + +### Evaluating a metric + +tools/analysis/eval_metric.py evaluates certain metrics of the results saved in a file according to a config file. + +The saved result file is created on `tools/test.py` by setting the arguments `--out ${RESULT_FILE}` to indicate the result file, +which stores the final output of the whole model. + +```shell +python tools/analysis/eval_metric.py ${CONFIG_FILE} ${RESULT_FILE} [--eval ${EVAL_METRICS}] [--cfg-options ${CFG_OPTIONS}] [--eval-options ${EVAL_OPTIONS}] +``` + +### Print the entire config + +`tools/analysis/print_config.py` prints the whole config verbatim, expanding all its imports. + +```shell +python tools/print_config.py ${CONFIG} [-h] [--options ${OPTIONS [OPTIONS...]}] +``` diff --git a/Downstream/Open-Set-Action-Recognition/experiments/analyze_features.py b/Downstream/Open-Set-Action-Recognition/experiments/analyze_features.py new file mode 100644 index 0000000..1ab4110 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/analyze_features.py @@ -0,0 +1,163 @@ +import argparse +import os +import torch +from mmcv.parallel import collate, scatter +from mmaction.datasets.pipelines import Compose +from mmaction.apis import init_recognizer +from sklearn.manifold import TSNE +import numpy as np +import matplotlib.pyplot as plt +from tqdm import tqdm + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--known_split', help='the split file path of the knowns') + parser.add_argument('--unknown_split', help='the split file path of the unknowns') + parser.add_argument('--result_file', help='the result file path') + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + args = parser.parse_args() + return args + + +def get_data(known_split, known_classes): + known_data = [] + labels = [] + video_dir = os.path.join(os.path.dirname(known_split), 'videos') + with open(known_split, 'r') as f: + for line in f.readlines(): + clsname, videoname = line.strip().split(' ')[0].split('/') + if clsname in known_classes.keys(): + videofile = os.path.join(video_dir, clsname, videoname) + known_data.append(videofile) + labels.append(known_classes[clsname]) + return known_data, labels + + +def inference_recognizer(model, video_path): + """Inference a video with the detector. + + Args: + model (nn.Module): The loaded recognizer. + video_path (str): The video file path/url or the rawframes directory + path. If ``use_frames`` is set to True, it should be rawframes + directory path. Otherwise, it should be video file path. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + test_pipeline = cfg.data.test.pipeline + test_pipeline = Compose(test_pipeline) + # prepare data (by default, we use videodata) + start_index = cfg.data.test.get('start_index', 0) + data = dict(filename=video_path, label=-1, start_index=start_index, modality='RGB') + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + + # forward the model + with torch.no_grad(): + feat_blob = model(return_loss=False, get_feat=True, **data) # (num_clips * num_crops, 2048, 1, 8, 8) + # spatial average pooling + kernel_size = (1, feat_blob.size(-2), feat_blob.size(-1)) + avg_pool2d = torch.nn.AvgPool3d(kernel_size, stride=1, padding=0) + feat_clips = avg_pool2d(feat_blob).view(feat_blob.size(0), feat_blob.size(1)) # (num_clips * num_crops, 2048) + # get the mean features of all clips and crops + feat_final = torch.mean(feat_clips, dim=0).cpu().numpy() # (2048,) + return feat_final + +def extract_feature(video_files): + + model = init_recognizer( + args.config, + args.checkpoint, + device=device, + use_frames=False) + cfg = model.cfg + torch.backends.cudnn.benchmark = True + cfg.data.test.test_mode = True + if 'bnn' in args.config: + model.test_cfg.npass = 1 + + X = [] + for videofile in tqdm(video_files, total=len(video_files), desc='Extract Feature'): + feature = inference_recognizer(model, videofile) # (2048,) + X.append(feature) + X = np.vstack(X) + return X + + +def set_deterministic(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + np.random.seed(seed) # Numpy module. + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = True + + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + set_deterministic(0) + + ind_clsID = [2, 10, 16, 69, 71, 21, 32, 41, 73, 29] # UCF-101 73 21 41 32 29 10 16 69 71 2 + ind_classes = {'Archery': 0, 'Biking': 1, 'BoxingPunchingBag': 2, 'PullUps': 3, 'PushUps': 4, + 'CliffDiving': 5, 'GolfSwing': 6, 'HorseRiding': 7, 'RockClimbingIndoor': 8, 'FloorGymnastics': 9} + ood_clsID = [12, 20, 21, 22, 50, 15, 16, 17, 18, 19] # HMDB-51 15 16 17 18 19 20 21 22 12 50 + ood_classes = {'fall_floor': 10, 'kick': 10, 'kick_ball': 10, 'kiss': 10, 'wave': 10, + 'golf': 10, 'handstand': 10, 'hit': 10, 'hug': 10, 'jump': 10} + feature_file = args.result_file[:-4] + '_feature_10p1.npz' + + # get the data of known classes + known_data, known_labels = get_data(args.known_split, ind_classes) + num_knowns = len(known_data) + # get the data of unknown classes + unknown_data, unknown_labels = get_data(args.unknown_split, ood_classes) + num_unknowns = len(unknown_data) + + if not os.path.exists(feature_file): + # save the figure + result_path = os.path.dirname(args.result_file) + if not os.path.exists(result_path): + os.makedirs(result_path) + + # extracting the feature + X = extract_feature(known_data + unknown_data) + + # save results + np.savez(feature_file[:-4], feature=X) + else: + results = np.load(feature_file, allow_pickle=True) + X = results['feature'] + + open_classes = {**ind_classes, 'Unknowns': len(ind_classes)} + open_labels = np.array(known_labels + [len(ind_classes)] * num_unknowns) + # run tSNE + print('running tSNE...') + Y = TSNE(n_components=2, random_state=0).fit_transform(X) + plt.figure(figsize=(5,4)) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 10 + for k, v in open_classes.items(): + inds = np.where(open_labels == v)[0] + if k == 'Unknowns': + plt.scatter(Y[inds, 0], Y[inds, 1], s=10, c='k', marker='^', label=k) + else: + plt.scatter(Y[inds, 0], Y[inds, 1], s=3) + plt.text(np.mean(Y[inds, 0])-5, np.mean(Y[inds, 1])+5, k, fontsize=fontsize) + xmin, xmax, ymin, ymax = np.min(Y[:, 0]), np.max(Y[:, 0]), np.min(Y[:, 1]), np.max(Y[:, 1]) + plt.xlim(xmin-5, xmax + 15) + plt.ylim(ymin-5, ymax + 10) + plt.legend(loc='lower right', fontsize=fontsize) + plt.xticks([]) + plt.yticks([]) + plt.savefig(args.result_file) + plt.savefig(args.result_file[:-4] + '.pdf') diff --git a/Downstream/Open-Set-Action-Recognition/experiments/baseline_openmax.py b/Downstream/Open-Set-Action-Recognition/experiments/baseline_openmax.py new file mode 100644 index 0000000..394b318 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/baseline_openmax.py @@ -0,0 +1,436 @@ +import argparse, os, sys +import torch +import mmcv +import numpy as np +import torch.nn.functional as F +from mmcv.parallel import collate, scatter +from mmaction.datasets.pipelines import Compose +from mmaction.apis import init_recognizer +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel +from tqdm import tqdm +import scipy.spatial.distance as spd +try: + import libmr +except ImportError: + print("LibMR not installed or libmr.so not found") + print("Install libmr: cd libMR/; ./compile.sh") + sys.exit() +from sklearn.metrics import f1_score, roc_auc_score, accuracy_score +import matplotlib.pyplot as plt + + +def set_deterministic(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + np.random.seed(seed) # Numpy module. + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = True + +def parse_args(): + """ Example shell script: + $ cd experiments + $ source activate mmaction + $ nohup python baseline_openmax.py --model i3d --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py + """ + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--trainset_split', default='data/ucf101/ucf101_train_split_1_videos.txt', help='the split file path of the training set') + parser.add_argument('--num_cls', type=int, default=101, help='The number of classes in training set.') + parser.add_argument('--cache_mav_dist', help='the result path to cache the mav and distances for each class.') + # test data config + parser.add_argument('--ind_data', help='the split file of in-distribution testing data') + parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data') + parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset') + parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes') + # device + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + args = parser.parse_args() + return args + + +def get_datalist(split_file): + assert os.path.exists(split_file), 'split file does not exist! %s'%(split_file) + video_dir = os.path.join(os.path.dirname(split_file), 'videos') + filelist, labels = [], [] + with open(split_file, 'r') as f: + for line in f.readlines(): + videofile = os.path.join(video_dir, line.strip().split(' ')[0]) + clsid = int(line.strip().split(' ')[-1]) + filelist.append(videofile) + labels.append(clsid) + return filelist, labels + +def spatial_temporal_pooling(feat_blob): + if isinstance(feat_blob, tuple): # slowfast model returns a tuple of features + assert len(feat_blob) == 2, "invalid feature tuple!" + avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1)) + x_fast, x_slow = feat_blob + x_fast = avg_pool3d(x_fast) + x_slow = avg_pool3d(x_slow) + # [N, channel_fast + channel_slow, 1, 1, 1] + feat_clips = torch.cat((x_slow, x_fast), dim=1).squeeze(-1).squeeze(-1).squeeze(-1) + else: + if len(feat_blob.size()) == 5: # 3D Network + # spatial temporal average pooling + kernel_size = (feat_blob.size(-3), feat_blob.size(-2), feat_blob.size(-1)) + avg_pool3d = torch.nn.AvgPool3d(kernel_size, stride=1, padding=0) + feat_clips = avg_pool3d(feat_blob).view(feat_blob.size(0), feat_blob.size(1)) # (c, D) + elif len(feat_blob.size()) == 4: # 2D Network + # spatial temporal average pooling + kernel_size = (feat_blob.size(-2), feat_blob.size(-1)) + avg_pool2d = torch.nn.AvgPool2d(kernel_size, stride=1, padding=0) + feat_clips = avg_pool2d(feat_blob).view(feat_blob.size(0), feat_blob.size(1)) # (c, D) + else: + print('Unsupported feature dimension: {}'.format(feat_blob.size())) + # get the mean features of all clips and crops + feat_final = torch.mean(feat_clips, dim=0, keepdim=True) # (c=1, D) + return feat_final + +def inference_recognizer(model, video_path): + """Inference a video with the detector. + + Args: + model (nn.Module): The loaded recognizer. + video_path (str): The video file path/url or the rawframes directory + path. If ``use_frames`` is set to True, it should be rawframes + directory path. Otherwise, it should be video file path. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + test_pipeline = cfg.data.test.pipeline + test_pipeline = Compose(test_pipeline) + # prepare data (by default, we use videodata) + start_index = cfg.data.test.get('start_index', 0) + data = dict(filename=video_path, label=-1, start_index=start_index, modality='RGB') + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + + # forward the model + with torch.no_grad(): + feat_blob, score = model(return_loss=False, return_score=True, get_feat=True, **data) # (c, D, t, h, w) + feat_blob = spatial_temporal_pooling(feat_blob) + feat_final = feat_blob.cpu().numpy() + score = score.cpu().numpy() + return feat_final, score + + +def extract_class_features(videolist, model, cls_gt): + features = [] + for videofile in tqdm(videolist, total=len(videolist), desc='Extract Class %d Features'%(cls_gt)): + feat, score = inference_recognizer(model, videofile) # (c, D) + cls_pred = np.argmax(score, axis=1) + if cls_gt in cls_pred: + features.append(feat) + features = np.array(features) # (N, c, D) + return features + + +def compute_distance(mav, features): + # extract features and compute distances for each class + num_channels = mav.shape[0] + eucos_dist, eu_dist, cos_dist = [], [], [] + for feat in features: + # compute distance of each channel + eu_channel, cos_channel, eu_cos_channel = [], [], [] + for c in range(num_channels): + eu_channel += [spd.euclidean(mav[c, :], feat[c, :])/200.] + cos_channel += [spd.cosine(mav[c, :], feat[c, :])] + eu_cos_channel += [spd.euclidean(mav[c, :], feat[c, :]) / 200. + + spd.cosine(mav[c, :], feat[c, :])] # Here, 200 is from the official OpenMax code + eu_dist += [eu_channel] + cos_dist += [cos_channel] + eucos_dist += [eu_cos_channel] + return np.array(eucos_dist), np.array(eu_dist), np.array(cos_dist) + + +def compute_channel_distance(mav_channel, feat_channel, distance_type='eucos'): + if distance_type == 'eucos': + query_distance = spd.euclidean(mav_channel, feat_channel)/200. + spd.cosine(mav_channel, feat_channel) + elif distance_type == 'eu': + query_distance = spd.euclidean(mav_channel, feat_channel)/200. + elif distance_type == 'cos': + query_distance = spd.cosine(mav_channel, feat_channel) + else: + print("distance type not known: enter either of eucos, euclidean or cosine") + return query_distance + + +def compute_mav_dist(videolist, labels, model, mav_dist_cachedir): + num_cls = model.cls_head.num_classes + mav_dist_list = [] + for cls_gt in range(num_cls): + mav_dist_file = os.path.join(mav_dist_cachedir, 'mav_dist_cls%03d.npz'%(cls_gt)) + mav_dist_list.append(mav_dist_file) + if os.path.exists(mav_dist_file): + continue + # data for the current class + inds = np.where(np.array(labels) == cls_gt)[0] + videos_cls = [videolist[i] for i in inds] + # extract MAV features + features = extract_class_features(videos_cls, model, cls_gt) + mav_train = np.mean(features, axis=0) + # compute distance + eucos_dist, eu_dist, cos_dist = compute_distance(mav_train, features) + # save MAV and distances + np.savez(mav_dist_file[:-4], mav=mav_train, eucos=eucos_dist, eu=eu_dist, cos=cos_dist) + return mav_dist_list + + +def weibull_fitting(mav_dist_list, distance_type='eucos', tailsize=20): + weibull_model = {} + for cls_gt in range(len(mav_dist_list)): + # load the mav_dist file + cache = np.load(mav_dist_list[cls_gt], allow_pickle=True) + mav_train = cache['mav'] + distances = cache[distance_type] + + weibull_model[cls_gt] = {} + weibull_model[cls_gt]['mean_vec'] = mav_train + + # weibull fitting for each channel + weibull_model[cls_gt]['weibull_model'] = [] + num_channels = mav_train.shape[0] + for c in range(num_channels): + mr = libmr.MR() + tailtofit = sorted(distances[:, c])[-tailsize:] + mr.fit_high(tailtofit, len(tailtofit)) + weibull_model[cls_gt]['weibull_model'] += [mr] + return weibull_model + + +def compute_openmax_prob(openmax_score, openmax_score_u): + num_channels, num_cls = openmax_score.shape + prob_scores, prob_unknowns = [], [] + for c in range(num_channels): + channel_scores, channel_unknowns = [], [] + for gt_cls in range(num_cls): + channel_scores += [np.exp(openmax_score[c, gt_cls])] + + total_denominator = np.sum(np.exp(openmax_score[c, :])) + np.exp(np.sum(openmax_score_u[c, :])) + prob_scores += [channel_scores/total_denominator ] + prob_unknowns += [np.exp(np.sum(openmax_score_u[c, :]))/total_denominator] + + prob_scores = np.array(prob_scores) + prob_unknowns = np.array(prob_unknowns) + + scores = np.mean(prob_scores, axis=0) + unknowns = np.mean(prob_unknowns, axis=0) + modified_scores = scores.tolist() + [unknowns] + assert len(modified_scores) == num_cls + 1 + modified_scores = np.expand_dims(np.array(modified_scores), axis=0) + return modified_scores + + +def openmax_recalibrate(weibull_model, feature, score, rank=1, distance_type='eucos'): + num_channels, num_cls = score.shape + # get the ranked alpha + alpharank = min(num_cls, rank) + ranked_list = np.mean(score, axis=0).argsort().ravel()[::-1] + alpha_weights = [((alpharank+1) - i)/float(alpharank) for i in range(1, alpharank+1)] + ranked_alpha = np.zeros((num_cls,)) + for i in range(len(alpha_weights)): + ranked_alpha[ranked_list[i]] = alpha_weights[i] + # calibrate + openmax_score, openmax_score_u = [], [] + for c in range(num_channels): + channel_scores = score[c, :] + openmax_channel = [] + openmax_unknown = [] + for cls_gt in range(num_cls): + # get distance between current channel and mean vector + mav_train = weibull_model[cls_gt]['mean_vec'] + category_weibull = weibull_model[cls_gt]['weibull_model'] + channel_distance = compute_channel_distance(mav_train[c, :], feature[c, :], distance_type=distance_type) + # obtain w_score for the distance and compute probability of the distance + wscore = category_weibull[c].w_score(channel_distance) + modified_score = channel_scores[cls_gt] * ( 1 - wscore*ranked_alpha[cls_gt] ) + openmax_channel += [modified_score] + openmax_unknown += [channel_scores[cls_gt] - modified_score] + # gather modified scores for each channel + openmax_score += [openmax_channel] + openmax_score_u += [openmax_unknown] + openmax_score = np.array(openmax_score) + openmax_score_u = np.array(openmax_score_u) + # Pass the recalibrated scores into openmax + openmax_prob = compute_openmax_prob(openmax_score, openmax_score_u) + + return openmax_prob + + +def run_inference(model, weibull_model, datalist_file): + # switch config for different dataset + cfg = model.cfg + cfg.data.test.ann_file = datalist_file + cfg.data.test.data_prefix = os.path.join(os.path.dirname(datalist_file), 'videos') + cfg.test_cfg.average_clips = 'score' # we only need scores before softmax layer + model.cfg.data.videos_per_gpu = 1 + model.cfg.data.workers_per_gpu = 0 + num_cls = model.cls_head.num_classes + + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + dist=False, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + + model = MMDataParallel(model, device_ids=[0]) + all_softmax, all_openmax, all_gts = [], [], [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + feat_blob, score = model(return_loss=False, return_score=True, get_feat=True, **data) + softmax_prob = F.softmax(score, dim=1).cpu().numpy() + # aggregate features + feat_blob = spatial_temporal_pooling(feat_blob) + feat_final = feat_blob.cpu().numpy() + # Re-calibrate score before softmax with OpenMax + openmax_prob = openmax_recalibrate(weibull_model, feat_final, score.cpu().numpy()) + # gather preds + all_openmax.append(openmax_prob) + all_softmax.append(softmax_prob) + # gather label + labels = data['label'].numpy() + all_gts.append(labels) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + + all_softmax = np.concatenate(all_softmax, axis=0) + all_openmax = np.concatenate(all_openmax, axis=0) + all_gts = np.concatenate(all_gts, axis=0) + + return all_openmax, all_softmax, all_gts + + +def evaluate_openmax(ind_openmax, ood_openmax, ind_labels, ood_labels, ood_ncls, num_rand=10): + ind_ncls = model.cls_head.num_classes + ind_results = np.argmax(ind_openmax, axis=1) + ood_results = np.argmax(ood_openmax, axis=1) + + # close-set accuracy (multi-class) + acc = accuracy_score(ind_labels, ind_results) + + # open-set auc-roc (binary class) + preds = np.concatenate((ind_results, ood_results), axis=0) + preds[preds == ind_ncls] = 1 # unknown class + preds[preds != 1] = 0 # known class + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels))) + auc = roc_auc_score(labels, preds) + print('OpenMax: ClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100)) + + # open set F1 score (multi-class) + macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')] + std_list = [0] + openness_list = [0] + for n in range(ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((num_rand), dtype=np.float32) + for m in range(num_rand): + cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) + std = np.std(macro_F1_multi) + macro_F1_list.append(macro_F1) + std_list.append(std) + + # draw comparison curves + macro_F1_list = np.array(macro_F1_list) + std_list = np.array(std_list) + + w_openness = np.array(openness_list) / 100. + open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness) + open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness) + print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100)) + + return openness_list, macro_F1_list, std_list + + + +if __name__ == '__main__': + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + set_deterministic(0) + + # initialize recognition model + model = init_recognizer(args.config, args.checkpoint, device=device, use_frames=False) + torch.backends.cudnn.benchmark = True + model.cfg.data.test.test_mode = True + + ######## Compute the Mean Activation Vector (MAV) and Distances ######## + if not os.path.exists(args.cache_mav_dist): + os.makedirs(args.cache_mav_dist) + # parse the video files list of training set + videolist, labels = get_datalist(args.trainset_split) + # compute mav and dist + mav_dist_list = compute_mav_dist(videolist, labels, model, args.cache_mav_dist) + + + ######## OOD and IND detection ######## + result_file = os.path.join(args.result_prefix + '_result.npz') + if not os.path.exists(result_file): + # prepare result path + result_dir = os.path.dirname(result_file) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # Weibull Model by EVT Fitting + print("Weibull fitting...") + weibull_model = weibull_fitting(mav_dist_list) + # run inference (OOD) + ood_openmax, ood_softmax, ood_labels = run_inference(model, weibull_model, args.ood_data) + # run inference (OOD) + ind_openmax, ind_softmax, ind_labels = run_inference(model, weibull_model, args.ind_data) + # save + np.savez(result_file[:-4], ind_openmax=ind_openmax, ood_openmax=ood_openmax, + ind_softmax=ind_softmax, ood_softmax=ood_softmax, + ind_label=ind_labels, ood_label=ood_labels) + else: + results = np.load(result_file, allow_pickle=True) + ind_openmax = results['ind_openmax'] # (N1, C+1) + ood_openmax = results['ood_openmax'] # (N2, C+1) + ind_softmax = results['ind_softmax'] # (N1, C) + ood_softmax = results['ood_softmax'] # (N2, C) + ind_labels = results['ind_label'] # (N1,) + ood_labels = results['ood_label'] # (N2,) + + ######## Evaluation ######## + openness_list, macro_F1_list, std_list = evaluate_openmax(ind_openmax, ood_openmax, ind_labels, ood_labels, args.ood_ncls, num_rand=args.num_rand) + + # draw F1 curve + plt.figure(figsize=(8,5)) # (w, h) + plt.plot(openness_list, macro_F1_list, 'r-', linewidth=2) + # plt.fill_between(openness_list, macro_F1_list - std_list, macro_F1_list + std_list, 'c') + plt.ylim(0.5, 1.0) + plt.xlabel('Openness (%)') + plt.ylabel('macro F1') + plt.grid('on') + plt.legend('OpenMax') + plt.tight_layout() + dataset_name = args.result_prefix.split('_')[-1] + png_file = os.path.join(os.path.dirname(args.result_prefix), 'F1_openness_%s.png'%(dataset_name)) + plt.savefig(png_file) + print('Openness curve figure is saved in: %s'%(png_file)) \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/baseline_rpl.py b/Downstream/Open-Set-Action-Recognition/experiments/baseline_rpl.py new file mode 100644 index 0000000..04c8246 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/baseline_rpl.py @@ -0,0 +1,202 @@ +import argparse +import os +import os.path as osp +import torch +import mmcv +from mmaction.apis import init_recognizer +from mmcv.parallel import collate, scatter +from mmaction.datasets.pipelines import Compose +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel +import numpy as np +from tqdm import tqdm +from sklearn.metrics import f1_score, roc_auc_score, accuracy_score +import torch.multiprocessing +torch.multiprocessing.set_sharing_strategy('file_system') + + +def parse_args(): + """ + experiments/baseline_rpl.py --config configs/recognition/tsm/inference_tsm_rpl.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_rpl/latest.pth \ + --train_data data/ucf101/ucf101_train_split_1_videos.txt \ + --ind_data + --result_prefix experiments/tsm/results_baselines/rpl/RPL + """ + parser = argparse.ArgumentParser(description='MMAction2 test') + # model and data config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--train_data', help='the split file of in-distribution training data') + parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size') + # test data config + parser.add_argument('--ind_data', help='the split file of in-distribution testing data') + parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data') + parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset') + parser.add_argument('--ood_dataname', choices=['HMDB', 'MiT'], help='the name of out-of-distribution testing data') + # env config + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + args = parser.parse_args() + return args + + +def set_deterministic(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + np.random.seed(seed) # Numpy module. + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = True + + +def run_inference(config, checkpoint, data_split, batch_size, device): + # initialize recognition model + model = init_recognizer(config, checkpoint, device=device, use_frames=False) + torch.backends.cudnn.benchmark = True + model.cfg.data.test.test_mode = True + model.cfg.test_cfg.average_clips = 'prob' # we need the probability socore from softmax layer + model.cfg.data.videos_per_gpu = batch_size # batch size + model.cfg.data.test.ann_file = data_split + model.cfg.data.test.data_prefix = os.path.join(os.path.dirname(data_split), 'videos') + + # build the dataloader + dataset = build_dataset(model.cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=model.cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=model.cfg.data.get('workers_per_gpu', 1), + dist=False, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **model.cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + + # running the inference + model = MMDataParallel(model, device_ids=[0]) + all_scores, all_labels = [], [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + scores = model(return_loss=False, **data) # (B, C) + all_scores.append(scores) + # gather labels + labels = data['label'].numpy() + all_labels.append(labels) + # use the first key as main key to calculate the batch size + bs = len(next(iter(data.values()))) + for _ in range(bs): + prog_bar.update() + all_scores = np.concatenate(all_scores, axis=0) + all_labels = np.concatenate(all_labels, axis=0) + return all_scores, all_labels + + +def evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, ood_ncls, thresh, num_rand=10): + + ind_ncls = ind_softmax.shape[1] + ind_results = np.argmax(ind_softmax, axis=1) + ood_results = np.argmax(ood_softmax, axis=1) + # close-set accuracy (multi-class) + acc = accuracy_score(ind_labels, ind_results) + + # open-set auc-roc (binary class) + ind_conf = np.max(ind_softmax, axis=1) + ood_conf = np.max(ood_softmax, axis=1) + preds = np.concatenate((ind_results, ood_results), axis=0) + confs = np.concatenate((ind_conf, ood_conf), axis=0) + preds[confs < threshold] = 1 # unknown class + preds[confs >= threshold] = 0 # known class + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels))) + auc = roc_auc_score(labels, preds) + print('\nClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100)) + + ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection + # open set F1 score (multi-class) + macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')] + std_list = [0] + openness_list = [0] + for n in range(ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((num_rand), dtype=np.float32) + for m in range(num_rand): + cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls + ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select]) + ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) + std = np.std(macro_F1_multi) + macro_F1_list.append(macro_F1) + std_list.append(std) + + # draw comparison curves + macro_F1_list = np.array(macro_F1_list) + std_list = np.array(std_list) + + w_openness = np.array(openness_list) / 100. + open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness) + open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness) + print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100)) + + return openness_list, macro_F1_list, std_list + + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + set_deterministic(0) + modelname = os.path.dirname(args.config).split('/')[-1].upper() + + ######## Compute threshold with training data ######## + result_file = os.path.join(os.path.dirname(args.result_prefix), modelname + '_RPL_trainset_softmax.npz') + if not os.path.exists(result_file): + # prepare result path + result_dir = os.path.dirname(result_file) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # run the inference on training data + trainset_scores, _ = run_inference(args.config, args.checkpoint, args.train_data, args.batch_size, device) + # save + np.savez(result_file[:-4], trainset_scores=trainset_scores) + else: + result = np.load(result_file) + trainset_scores = result['trainset_scores'] + + max_scores = np.max(trainset_scores, axis=1) + scores_sort = np.sort(max_scores)[::-1] # sort the uncertainties with descending order + N = max_scores.shape[0] + threshold = scores_sort[int(N * 0.95)-1] # 95% percentile + + print('\nThe RPL softmax threshold on UCF-101 train set: %lf'%(threshold)) + + ######## OOD and IND detection ######## + testset_result = os.path.join(os.path.dirname(args.result_prefix), modelname +'_RPL_'+ args.ood_dataname +'_result.npz') + if not os.path.exists(testset_result): + # prepare result path + result_dir = os.path.dirname(testset_result) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # run the inference on OOD data + ood_softmax, ood_labels = run_inference(args.config, args.checkpoint, args.ood_data, args.batch_size, device) + # run the inference on IND data + ind_softmax, ind_labels = run_inference(args.config, args.checkpoint, args.ind_data, args.batch_size, device) + # save + np.savez(testset_result[:-4], ind_softmax=ind_softmax, ood_softmax=ood_softmax, + ind_label=ind_labels, ood_label=ood_labels) + else: + results = np.load(testset_result, allow_pickle=True) + ind_softmax = results['ind_softmax'] # (N1, C) + ood_softmax = results['ood_softmax'] # (N2, C) + ind_labels = results['ind_label'] # (N1,) + ood_labels = results['ood_label'] # (N2,) + + openness_list, macro_F1_list, std_list = evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, args.ood_ncls, threshold) \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/baseline_softmax.py b/Downstream/Open-Set-Action-Recognition/experiments/baseline_softmax.py new file mode 100644 index 0000000..dd5084f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/baseline_softmax.py @@ -0,0 +1,182 @@ +import argparse, os, sys +import torch +import mmcv +import numpy as np +import torch.nn.functional as F +from mmcv.parallel import collate, scatter +from mmaction.datasets.pipelines import Compose +from mmaction.apis import init_recognizer +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel +from tqdm import tqdm +from sklearn.metrics import f1_score, roc_auc_score, accuracy_score +import matplotlib.pyplot as plt + + +def parse_args(): + """ Example shell script: + $ cd experiments + $ source activate mmaction + $ nohup python baseline_openmax.py --model i3d --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py + """ + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--train_data', default='data/ucf101/ucf101_train_split_1_videos.txt', help='the split file of in-distribution training data') + parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size') + parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset') + # device + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + args = parser.parse_args() + return args + + +def set_deterministic(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + np.random.seed(seed) # Numpy module. + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = True + + +def run_inference(config, checkpoint, train_data, batch_size, device): + # initialize recognition model + model = init_recognizer(config, checkpoint, device=device, use_frames=False) + torch.backends.cudnn.benchmark = True + model.cfg.data.test.test_mode = True + model.cfg.test_cfg.average_clips = 'prob' # we need the probability socore from softmax layer + model.cfg.data.videos_per_gpu = batch_size # batch size of training set + # We use training data to obtain the threshold + model.cfg.data.test.ann_file = train_data + model.cfg.data.test.data_prefix = os.path.join(os.path.dirname(train_data), 'videos') + + # build the dataloader + dataset = build_dataset(model.cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=model.cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=model.cfg.data.get('workers_per_gpu', 1), + dist=False, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **model.cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + + # running the inference + model = MMDataParallel(model, device_ids=[0]) + all_scores = [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + scores = model(return_loss=False, **data) # (B, C) + all_scores.append(scores) + # use the first key as main key to calculate the batch size + bs = len(next(iter(data.values()))) + for _ in range(bs): + prog_bar.update() + all_scores = np.concatenate(all_scores, axis=0) + return all_scores + + +def evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, ood_ncls, thresh, num_rand=10): + + ind_ncls = ind_softmax.shape[1] + ind_results = np.argmax(ind_softmax, axis=1) + ood_results = np.argmax(ood_softmax, axis=1) + # close-set accuracy (multi-class) + acc = accuracy_score(ind_labels, ind_results) + + # open-set auc-roc (binary class) + ind_conf = np.max(ind_softmax, axis=1) + ood_conf = np.max(ood_softmax, axis=1) + preds = np.concatenate((ind_results, ood_results), axis=0) + confs = np.concatenate((ind_conf, ood_conf), axis=0) + preds[confs < threshold] = 1 # unknown class + preds[confs >= threshold] = 0 # known class + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels))) + auc = roc_auc_score(labels, preds) + print('SoftMax: ClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100)) + + ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection + # open set F1 score (multi-class) + macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')] + std_list = [0] + openness_list = [0] + for n in range(ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((num_rand), dtype=np.float32) + for m in range(num_rand): + cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls + ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select]) + ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) + std = np.std(macro_F1_multi) + macro_F1_list.append(macro_F1) + std_list.append(std) + + # draw comparison curves + macro_F1_list = np.array(macro_F1_list) + std_list = np.array(std_list) + + w_openness = np.array(openness_list) / 100. + open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness) + open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness) + print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100)) + + return openness_list, macro_F1_list, std_list + + +if __name__ == '__main__': + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + set_deterministic(0) + modelname, methodname, dataname = args.result_prefix.split('/')[-1].split('_') + + ######## Compute threshold with training data ######## + result_file = os.path.join(os.path.dirname(args.result_prefix), modelname +'_SoftMax_trainset_softmax.npz') + if not os.path.exists(result_file): + # prepare result path + result_dir = os.path.dirname(result_file) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # run the inference on training data + trainset_scores = run_inference(args.config, args.checkpoint, args.train_data, args.batch_size, device) + # save + np.savez(result_file[:-4], trainset_scores=trainset_scores) + else: + result = np.load(result_file) + trainset_scores = result['trainset_scores'] + + max_scores = np.max(trainset_scores, axis=1) + scores_sort = np.sort(max_scores)[::-1] # sort the uncertainties with descending order + N = max_scores.shape[0] + threshold = scores_sort[int(N * 0.95)-1] + + print('\nThe model %s softmax threshold on UCF-101 train set: %lf'%(args.result_prefix.split('/')[-1], threshold)) + + # load the softmax results on testing dataset (in OpenMax baseline) + ######## OOD and IND detection ######## + openmax_result = os.path.join(os.path.dirname(args.result_prefix), '../openmax', modelname +'_OpenMax_'+ dataname +'_result.npz') + if not os.path.exists(openmax_result): + print('File does not exist! %s'%(openmax_result)) + print('Run baseline_openmax.py first to get softmax testing results!') + else: + results = np.load(openmax_result, allow_pickle=True) + ind_softmax = results['ind_softmax'] # (N1, C) + ood_softmax = results['ood_softmax'] # (N2, C) + ind_labels = results['ind_label'] # (N1,) + ood_labels = results['ood_label'] # (N2,) + + openness_list, macro_F1_list, std_list = evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, args.ood_ncls, threshold) \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/compare_openness.py b/Downstream/Open-Set-Action-Recognition/experiments/compare_openness.py new file mode 100644 index 0000000..6a51dcd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/compare_openness.py @@ -0,0 +1,114 @@ +import os +import argparse +import numpy as np +from sklearn.metrics import f1_score, roc_auc_score, accuracy_score +import matplotlib.pyplot as plt + +def parse_args(): + '''Command instruction: + source activate mmaction + python experiments/compare_openness.py --ind_ncls 101 --ood_ncls 51 + ''' + parser = argparse.ArgumentParser(description='Compare the performance of openness') + # model config + parser.add_argument('--base_model', default='i3d', help='the backbone model name') + parser.add_argument('--baselines', nargs='+', default=['I3D_Dropout_BALD', 'I3D_BNN_BALD', 'I3D_EDLlog_EDL', 'I3D_EDLlogAvUC_EDL']) + parser.add_argument('--thresholds', nargs='+', type=float, default=[0.000423, 0.000024, 0.495783, 0.495783]) + parser.add_argument('--styles', nargs='+', default=['-b', '-k', '-r', '-g', '-m']) + parser.add_argument('--ind_ncls', type=int, default=101, help='the number of classes in known dataset') + parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset') + parser.add_argument('--ood_data', default='HMDB', help='the name of OOD dataset.') + parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes') + parser.add_argument('--result_png', default='F1_openness_compare_HMDB.png') + args = parser.parse_args() + return args + + +def main(): + + result_path = os.path.join('./experiments', args.base_model, 'results') + plt.figure(figsize=(8,5)) # (w, h) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 15 + for style, thresh, baseline in zip(args.styles, args.thresholds, args.baselines): + result_file = os.path.join(result_path, baseline + '_%s'%(args.ood_data) + '_result.npz') + assert os.path.exists(result_file), "File not found! Run ood_detection first!" + # load the testing results + results = np.load(result_file, allow_pickle=True) + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + + # close-set accuracy (multi-class) + acc = accuracy_score(ind_labels, ind_results) + # open-set auc-roc (binary class) + preds = np.concatenate((ind_results, ood_results), axis=0) + uncertains = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0) + preds[uncertains > thresh] = 1 + preds[uncertains <= thresh] = 0 + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels))) + aupr = roc_auc_score(labels, preds) + print('Model: %s, ClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(baseline, acc * 100, aupr * 100)) + + # open set F1 score (multi-class) + ind_results[ind_uncertainties > thresh] = args.ind_ncls # falsely rejection + macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')] + std_list = [0] + openness_list = [0] + for n in range(args.ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * args.ind_ncls) / (2 * args.ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((args.num_rand), dtype=np.float32) + for m in range(args.num_rand): + cls_select = np.random.choice(args.ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_uncertainties = np.concatenate([ood_uncertainties[ood_labels == clsid] for clsid in cls_select]) + ood_sub_results[ood_sub_uncertainties > thresh] = args.ind_ncls # correctly rejection + ood_sub_labels = np.ones_like(ood_sub_results) * args.ind_ncls + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) + std = np.std(macro_F1_multi) + macro_F1_list.append(macro_F1) + std_list.append(std) + + # draw comparison curves + macro_F1_list = np.array(macro_F1_list) + std_list = np.array(std_list) + plt.plot(openness_list, macro_F1_list * 100, style, linewidth=2) + # plt.fill_between(openness_list, macro_F1_list - std_list, macro_F1_list + std_list, style) + + w_openness = np.array(openness_list) / 100. + open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness) + open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness) + print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100)) + + plt.xlim(0, max(openness_list)) + plt.ylim(60, 80) + plt.xlabel('Openness (%)', fontsize=fontsize) + plt.ylabel('macro F1 (%)', fontsize=fontsize) + plt.grid('on') + # plt.legend(args.baselines) + plt.legend(['MC Dropout BALD', 'BNN SVI BALD', 'DEAR (vanilla)', 'DEAR (alter)', 'DEAR (joint)'], loc='lower left', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.tight_layout() + png_file = os.path.join(result_path, args.result_png) + plt.savefig(png_file) + plt.savefig(png_file[:-4] + '.pdf') + print('Openness curve figure is saved in: %s'%(png_file)) + + +if __name__ == "__main__": + + np.random.seed(123) + args = parse_args() + + main() \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/compare_openness_new.py b/Downstream/Open-Set-Action-Recognition/experiments/compare_openness_new.py new file mode 100644 index 0000000..4a12cbf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/compare_openness_new.py @@ -0,0 +1,167 @@ +import os +import argparse +from matplotlib.pyplot import axis +import numpy as np +from sklearn.metrics import roc_auc_score, accuracy_score, precision_recall_curve, auc, roc_curve +from terminaltables import AsciiTable + +def parse_args(): + '''Command instruction: + source activate mmaction + python experiments/compare_openness.py + ''' + parser = argparse.ArgumentParser(description='Compare the performance of openness') + # model config + parser.add_argument('--base_model', default='i3d', help='the backbone model name') + parser.add_argument('--ood_data', default='HMDB', help='the name of OOD dataset.') + parser.add_argument('--thresholds', nargs='+', type=float, default=[-1,-1,-1,-1,-1,-1]) + parser.add_argument('--baseline_results', nargs='+', help='the testing results files.') + args = parser.parse_args() + return args + + +def eval_osr(y_true, y_pred): + # open-set auc-roc (binary class) + auroc = roc_auc_score(y_true, y_pred) + + # open-set auc-pr (binary class) + # as an alternative, you may also use `ap = average_precision_score(labels, uncertains)`, which is approximate to aupr. + precision, recall, _ = precision_recall_curve(y_true, y_pred) + aupr = auc(recall, precision) + + # open-set fpr@95 (binary class) + fpr, tpr, _ = roc_curve(y_true, y_pred, pos_label=1) + operation_idx = np.abs(tpr - 0.95).argmin() + fpr95 = fpr[operation_idx] # FPR when TPR at 95% + + return auroc, aupr, fpr95 + + +def parse_results(result_file, method='softmax'): + # Softmax and OpenMax + assert os.path.exists(result_file), "File not found! Run baseline_openmax.py first to get softmax testing results!\n%s"%(result_file) + results = np.load(result_file, allow_pickle=True) + # parse results + ind_labels = results['ind_label'] # (N1,) + ood_labels = results['ood_label'] # (N2,) + if method == 'softmax': + ind_softmax = results['ind_softmax'] # (N1, C) + ood_softmax = results['ood_softmax'] # (N2, C) + return ind_softmax, ood_softmax, ind_labels, ood_labels + elif method == 'openmax': + ind_openmax = results['ind_openmax'] # (N1, C+1) + ood_openmax = results['ood_openmax'] # (N2, C+1) + return ind_openmax, ood_openmax, ind_labels, ood_labels + + +def eval_confidence_methods(ind_probs, ood_probs, ind_labels, ood_labels, score='max_prob', ind_ncls=101, threshold=-1): + # close-set accuracy (multi-class) + ind_results = np.argmax(ind_probs, axis=1) + ood_results = np.argmax(ood_probs, axis=1) + acc = accuracy_score(ind_labels, ind_results) + + # open-set evaluation (binary class) + if score == 'binary': + preds = np.concatenate((ind_results, ood_results), axis=0) + idx_pos = preds == ind_ncls + idx_neg = preds != ind_ncls + preds[idx_pos] = 1 # unknown class + preds[idx_neg] = 0 # known class + elif score == 'max_prob': + ind_conf = np.max(ind_probs, axis=1) + ood_conf = np.max(ood_probs, axis=1) + confs = np.concatenate((ind_conf, ood_conf), axis=0) + if threshold > 0: + preds = np.concatenate((ind_results, ood_results), axis=0) + preds[confs < threshold] = 1 # unknown class + preds[confs >= threshold] = 0 # known class + else: + preds = 1 - confs + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels))) + auroc, aupr, fpr95 = eval_osr(labels, preds) + + return acc, auroc, aupr, fpr95 + + +def eval_uncertainty_methods(result_file, threshold=-1): + assert os.path.exists(result_file), "File not found! Run ood_detection first!\n%s"%(result_file) + # load the testing results + results = np.load(result_file, allow_pickle=True) + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + + # close-set accuracy (multi-class) + acc = accuracy_score(ind_labels, ind_results) + + # open-set evaluation (binary class) + if threshold > 0: + preds = np.concatenate((ind_results, ood_results), axis=0) + uncertains = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0) + preds[uncertains > threshold] = 1 + preds[uncertains <= threshold] = 0 + else: + preds = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0) + + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels))) + auroc, aupr, fpr95 = eval_osr(labels, preds) + + return acc, auroc, aupr, fpr95 + + +def main(): + + print(f'\nNew Evaluation Results (open-set data: {args.ood_data}, backbone: {args.base_model})') + display_data = [["Methods", "AUROC (%)", "AUPR (%)", "FPR@95 (%)", "Closed-Set ACC (%)"], + ["OpenMax"], ["MC Dropout"], ["BNN SVI"], ["SoftMax"], ["RPL"], ["DEAR (ours)"]] # table heads and rows + exp_dir = os.path.join('./experiments', args.base_model) + + # OpenMax + result_path = os.path.join(exp_dir, args.baseline_results[0]) + ind_openmax, ood_openmax, ind_labels, ood_labels = parse_results(result_path, method='openmax') + acc, auroc, aupr, fpr95 = eval_confidence_methods(ind_openmax, ood_openmax, ind_labels, ood_labels, score='binary') + display_data[1].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)]) + + # MC Dropout + result_path = os.path.join(exp_dir, args.baseline_results[1]) + acc, auroc, aupr, fpr95 = eval_uncertainty_methods(result_path, threshold=args.thresholds[1]) + display_data[2].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)]) + + # BNN SVI + result_path = os.path.join(exp_dir, args.baseline_results[2]) + acc, auroc, aupr, fpr95 = eval_uncertainty_methods(result_path, threshold=args.thresholds[2]) + display_data[3].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)]) + + # SoftMax + result_path = os.path.join(exp_dir, args.baseline_results[3]) + ind_softmax, ood_softmax, ind_labels, ood_labels = parse_results(result_path, method='softmax') + acc, auroc, aupr, fpr95 = eval_confidence_methods(ind_softmax, ood_softmax, ind_labels, ood_labels, threshold=args.thresholds[3]) + display_data[4].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)]) + + # RPL + result_path = os.path.join(exp_dir, args.baseline_results[4]) + ind_softmax, ood_softmax, ind_labels, ood_labels = parse_results(result_path, method='softmax') + acc, auroc, aupr, fpr95 = eval_confidence_methods(ind_softmax, ood_softmax, ind_labels, ood_labels, threshold=args.thresholds[4]) + display_data[5].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)]) + + # DEAR (ours) + result_path = os.path.join(exp_dir, args.baseline_results[5]) + acc, auroc, aupr, fpr95 = eval_uncertainty_methods(result_path, threshold=args.thresholds[5]) + display_data[6].extend(["%.3f"%(auroc * 100), "%.3f"%(aupr * 100), "%.3f"%(fpr95 * 100), "%.3f"%(acc * 100)]) + + + table = AsciiTable(display_data) + table.inner_footing_row_border = True + table.justify_columns = {0: 'left', 1: 'center', 2: 'center', 3: 'center', 4: 'center'} + print(table.table) + print("\n") + +if __name__ == "__main__": + + np.random.seed(123) + args = parse_args() + + main() \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/baseline_csn_openmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/baseline_csn_openmax.sh new file mode 100644 index 0000000..385f5d4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/baseline_csn_openmax.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/csn/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_openmax.py \ + --config configs/recognition/csn/inference_csn_dnn.py \ + --checkpoint work_dirs/csn/finetune_ucf101_csn_dnn/latest.pth \ + --cache_mav_dist experiments/csn/results_baselines/openmax/ucf101_mav_dist \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/csn/results_baselines/openmax/CSN_OpenMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/baseline_csn_softmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/baseline_csn_softmax.sh new file mode 100644 index 0000000..9dccbe8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/baseline_csn_softmax.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 + +case ${OOD_DATASET} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/csn/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_softmax.py \ + --config configs/recognition/csn/inference_csn_dnn.py \ + --checkpoint work_dirs/csn/finetune_ucf101_csn_dnn/latest.pth \ + --batch_size 1 \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/csn/results_baselines/softmax/CSN_SoftMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/evaluate_csn_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/evaluate_csn_dnn_ucf101.sh new file mode 100644 index 0000000..9174858 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/evaluate_csn_dnn_ucf101.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/csn/finetune_ucf101_csn_dnn.py \ + work_dirs/csn/finetune_ucf101_csn_dnn/latest.pth \ + --videos_per_gpu 1 \ + --out work_dirs/csn/test_ucf101_csn_dnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/evaluate_csn_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/evaluate_csn_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..e5fd553 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/evaluate_csn_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/csn/finetune_ucf101_csn_edlnokl_avuc_debias.py \ + work_dirs/csn/finetune_ucf101_csn_edlnokl_avuc_debias/latest.pth \ + --videos_per_gpu 1 \ + --out work_dirs/csn/test_ucf101_csn_edlnokl_avuc_debias.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/finetune_csn_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/finetune_csn_dnn_ucf101.sh new file mode 100644 index 0000000..8bad83c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/finetune_csn_dnn_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/csn/finetune_ucf101_csn_dnn.py \ + --work-dir work_dirs/csn/finetune_ucf101_csn_dnn \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/finetune_csn_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/finetune_csn_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..c274fb2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/finetune_csn_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/csn/finetune_ucf101_csn_edlnokl_avuc_debias.py \ + --work-dir work_dirs/csn/finetune_ucf101_csn_edlnokl_avuc_debias \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/run_get_threshold.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_get_threshold.sh new file mode 100644 index 0000000..f578c1e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_get_threshold.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +MODEL=$2 +BATCHSIZE=$3 +TRAIN_DATA='data/ucf101/ucf101_train_split_1_videos.txt' +RESULT_DIR='experiments/csn/results' + +case ${MODEL} in + dnn) + # get the BALD threshold for csn model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/csn/inference_csn_dnn.py \ + --checkpoint work_dirs/csn/finetune_ucf101_csn_dnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/CSN_DNN_BALD + ;; + edlnokl_avuc_debias) + # get the EDL threshold for CSN_EDL_AvUC_Debias model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/csn/inference_csn_enn.py \ + --checkpoint work_dirs/csn/finetune_ucf101_csn_edlnokl_avuc_debias/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/CSN_EDLNoKLAvUCDebias_EDL + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/run_ood_detection.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_ood_detection.sh new file mode 100644 index 0000000..7ce5f0e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_ood_detection.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +MODEL=$3 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + ;; + MiT) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/csn/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/csn/inference_csn_dnn.py \ + --checkpoint work_dirs/csn/finetune_ucf101_csn_dnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/CSN_DNN_BALD_${OOD_DATASET} + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/csn/inference_csn_enn.py \ + --checkpoint work_dirs/csn/finetune_ucf101_csn_edlnokl_avuc_debias/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/CSN_EDLNoKLAvUCDebias_EDL_${OOD_DATASET} + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/run_openness.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_openness.sh new file mode 100644 index 0000000..4630e6a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_openness.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +case ${OOD_DATA} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Invalid OOD Dataset: "${OOD_DATA} + exit + ;; +esac + +# OOD Detection comparison +python experiments/compare_openness.py \ + --base_model csn \ + --baselines CSN_DNN_BALD CSN_EDLNoKLAvUCDebias_EDL \ + --thresholds 0.000089 0.004563 \ + --styles b r \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ind_ncls 101 \ + --result_png F1_openness_compare_${OOD_DATA}.png + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/csn/run_reliability_evaluation.sh b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_reliability_evaluation.sh new file mode 100644 index 0000000..23745fe --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/csn/run_reliability_evaluation.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATASET=$1 +MODEL=$2 +RESULT_DIR='experiments/csn/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/CSN_DNN_BALD_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/CSN_DNN_BALD_${OOD_DATASET}_reliability + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/CSN_EDLNoKLAvUCDebias_EDL_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/CSN_EDLNoKLAvUCDebias_EDL_${OOD_DATASET}_reliability + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/demo.py b/Downstream/Open-Set-Action-Recognition/experiments/demo.py new file mode 100644 index 0000000..56df160 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/demo.py @@ -0,0 +1,134 @@ +import argparse +import os +import torch +from mmaction.apis import init_recognizer +from mmcv.parallel import collate, scatter +from mmaction.datasets.pipelines import Compose +import numpy as np +from tqdm import tqdm + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('config', help='test config file path') + parser.add_argument('--ckpt_dear', help='checkpoint file/url') + parser.add_argument('--ckpt_nodebias', help='checkpoint file/url') + parser.add_argument('--split_file', help='the split file for evaluation') + parser.add_argument('--video_path', help='the video path for evaluation') + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_list', help='result file prefix') + args = parser.parse_args() + return args + + +def set_deterministic(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + np.random.seed(seed) # Numpy module. + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = True + + +def init_inference(config, checkpoint): + # build the recognizer from a config file and checkpoint file/url + model = init_recognizer(config, checkpoint, device=device, use_frames=False) + cfg = model.cfg + cfg.data.test.test_mode = True + cfg.data.test.ann_file = args.split_file + cfg.data.test.data_prefix = args.video_path + evidence = cfg.get('evidence', 'exp') + return model, evidence + + +def parse_listfile(list_file, videos_path): + assert os.path.exists(list_file), 'split file does not exist! %s'%(list_file) + assert os.path.exists(videos_path), 'video path does not exist! %s'%(videos_path) + # parse file list + filelist, labels = [], [] + with open(list_file, 'r') as f: + for line in f.readlines(): + videofile = line.strip().split(' ')[0] + label = int(line.strip().split(' ')[1]) + videofile_full = os.path.join(videos_path, videofile) + assert os.path.exists(videofile_full), 'video file does not exist! %s'%(videofile_full) + filelist.append(videofile_full) + labels.append(label) + return filelist, labels + + +def run_evidence_inference(model, video_path, evidence='exp'): + """Inference a video with the detector. + + Args: + model (nn.Module): The loaded recognizer. + video_path (str): The video file path/url or the rawframes directory + path. If ``use_frames`` is set to True, it should be rawframes + directory path. Otherwise, it should be video file path. + """ + cfg = model.cfg + device = next(model.parameters()).device # model device + # build the data pipeline + test_pipeline = cfg.data.test.pipeline + test_pipeline = Compose(test_pipeline) + # prepare data (by default, we use videodata) + start_index = cfg.data.test.get('start_index', 0) + data = dict(filename=video_path, label=-1, start_index=start_index, modality='RGB') + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + + # get the evidence function + if evidence == 'relu': + from mmaction.models.losses.edl_loss import relu_evidence as get_evidence + elif evidence == 'exp': + from mmaction.models.losses.edl_loss import exp_evidence as get_evidence + elif evidence == 'softplus': + from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence + else: + raise NotImplementedError + num_classes = model.cls_head.num_classes + + # forward the model + with torch.no_grad(): + output = model(return_loss=False, **data)[0] # batchsize = 1 + evidence = get_evidence(torch.from_numpy(output)) + alpha = evidence + 1 + uncertainty = num_classes / torch.sum(alpha, dim=0) + scores = alpha / torch.sum(alpha, dim=0, keepdim=True) + return scores.cpu().numpy(), uncertainty.cpu().numpy() + + +def main(): + model_dear, evidence_dear = init_inference(args.config, args.ckpt_dear) + model_nodebias, evidence_nodebias = init_inference(args.config, args.ckpt_nodebias) + # result file + result_path = os.path.dirname(args.result_list) + if not os.path.exists(result_path): + os.makedirs(result_path) + fid = open(args.result_list, 'w') + # run inference + videofiles, labels = parse_listfile(args.split_file, args.video_path) + for i, (videofile, label) in tqdm(enumerate(zip(videofiles, labels)), total=len(videofiles)): + scores_dear, uncertainty_dear = run_evidence_inference(model_dear, videofile, evidence_dear) # (101,) + scores_nodebias, uncertainty_nodebias = run_evidence_inference(model_nodebias, videofile, evidence_nodebias) # (101,) + # save + pred_dear = int(np.argmax(scores_dear)) + pred_nodebias = int(np.argmax(scores_nodebias)) + if pred_dear == label and pred_nodebias != label: + fid.writelines('%s %d %d %.6lf %d %.6lf\n'%(videofile, label, pred_dear, float(uncertainty_dear), pred_nodebias, float(uncertainty_nodebias))) + fid.close() + + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + set_deterministic(1234) + + main() + \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/draw_confusion_matrix.py b/Downstream/Open-Set-Action-Recognition/experiments/draw_confusion_matrix.py new file mode 100644 index 0000000..c0fb1af --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/draw_confusion_matrix.py @@ -0,0 +1,209 @@ +import numpy as np +import argparse +import os +import matplotlib.pyplot as plt +# from mmaction.core.evaluation import confusion_matrix +from mpl_toolkits.axes_grid1 import make_axes_locatable +import matplotlib.ticker as ticker +from matplotlib.colors import LogNorm + + +def confusion_maxtrix(ind_labels, ind_results, ind_uncertainties, + ood_labels, ood_results, ood_uncertainties, + threshold, know_ood_labels=False, normalize=True): + num_indcls = max(ind_labels) + 1 + num_oodcls = max(ood_labels) + 1 + confmat = np.zeros((num_indcls + num_oodcls, num_indcls + num_oodcls), dtype=np.float32) + for rlabel, plabel, uncertain in zip(ind_labels, ind_results, ind_uncertainties): + if uncertain > threshold: + # known --> unknown (bottom-left) + confmat[num_indcls:num_indcls+num_oodcls, rlabel] += 1.0 * num_oodcls + else: + # known --> known (top-left) + confmat[plabel, rlabel] += 1.0 * num_indcls + if know_ood_labels: + for rlable, plabel, uncertain in zip(ood_labels, ood_results, ood_uncertainties): + if uncertain > threshold: + # unknown --> unknown (bottom-right) + confmat[num_indcls:num_indcls+num_oodcls, num_indcls+rlable] += 1.0 + else: + # unknown --> known (top-right) + confmat[plabel, num_indcls+rlable] += 1.0 * num_oodcls + else: + for plabel, uncertain in zip(ood_results, ood_uncertainties): + if uncertain > threshold: + # unknown --> unknown (bottom-right) + confmat[num_indcls:num_indcls+num_oodcls, num_indcls:num_indcls+num_oodcls] += 1.0 + else: + # unknown --> known (top-right) + confmat[plabel, num_indcls:num_indcls+num_oodcls] += 1 * num_oodcls + if normalize: + minval = np.min(confmat[np.nonzero(confmat)]) + maxval = np.max(confmat) + confmat = (confmat - minval) / (maxval - minval + 1e-6) + # confmat = np.nan_to_num(confmat) + return confmat + + + +def confusion_maxtrix_top(ind_labels, ind_results, ind_uncertainties, + ood_labels, ood_results, ood_uncertainties, + threshold, normalize=True): + num_indcls = max(ind_labels) + 1 + num_oodcls = max(ood_labels) + 1 + confmat = np.ones((num_indcls, num_indcls + num_oodcls), dtype=np.float32) # (K, K+C) white + # process in-distribution results + for rlabel, plabel, uncertain in zip(ind_labels, ind_results, ind_uncertainties): + if uncertain < threshold: + # known --> known (top-left) + confmat[plabel, rlabel] -= 1.0 / len(ind_results) # make it darker + # process out-of-distribution results + for rlable, plabel, uncertain in zip(ood_labels, ood_results, ood_uncertainties): + if uncertain < threshold: + # unknown --> known (top-right) + confmat[plabel, num_indcls+rlable] -= 1.0 / len(ood_results) # make it darker + if normalize: + minval = np.min(confmat) + maxval = np.max(confmat) + confmat = (confmat - minval) / (maxval - minval + 1e-6) # normalize to [0, 1] + return confmat + + + +def plot_confmat(confmat, know_ood_labels=False): + fig = plt.figure(figsize=(4,4)) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 20 + ax = plt.gca() + confmat_vis = confmat.copy() + im = ax.imshow(confmat_vis, cmap='hot') + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + cbar = plt.colorbar(im, cax=cax) + # cbar.locator = ticker.MaxNLocator(nbins=5) + # # barticks = np.linspace(np.min(confmat) * 1000, np.max(confmat) * 1000, 5).tolist() + # # cbar.set_ticks(barticks) + # cbar.ax.tick_params(labelsize=fontsize) + cbar.set_ticks([]) + cbar.update_ticks() + plt.tight_layout() + save_file = args.save_file[:-4] + '_knownOOD.png' if know_ood_labels else args.save_file + plt.savefig(save_file, bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig(save_file[:-4] + '.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.close() + +def get_topk_2d(arr, topk=5): + col_indices = np.argmax(arr, axis=1) # column indices + vals = [arr[r, c] for r, c in enumerate(col_indices)] + row_indices = np.argsort(vals)[::-1] # decreasing sort + result_inds = np.zeros((topk, 2), dtype=np.int32) + result_vals = [] + for k in range(topk): + result_inds[k, 0] = row_indices[k] + result_inds[k, 1] = col_indices[row_indices[k]] + result_vals.append(vals[row_indices[k]]) + return result_inds, result_vals + +def read_classnames(list_file): + names = [] + with open(list_file, 'r') as f: + for line in f.readlines(): + names.append(line.strip().split(' ')[-1]) + return names + +def plot_top_confmat(confmat): + ind_mappings = 'data/ucf101/annotations/classInd.txt' + ind_cls_names = read_classnames(ind_mappings) + ood_mappings = 'data/hmdb51/annotations/classInd.txt' + ood_cls_names = read_classnames(ood_mappings) + + fig = plt.figure(figsize=(8,4)) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 15 + ax = plt.gca() + confmat_vis = confmat.copy() + im = ax.imshow(confmat_vis, cmap='hot') + plt.axvline(num_indcls-1, 0, num_indcls-1, linestyle='--') + # find the top-K mis-classification for unknown + result_inds, result_vals = get_topk_2d(1 - confmat_vis[:, num_indcls+1:]) + ood_ids = np.argmin(confmat_vis[:, num_indcls+1:], axis=1) + text_offset = [[-25, 1], [-25, -3], [-61, 1], [-28, 2], [-32, 1]] + for i, (r, c) in enumerate(result_inds): + hd = plt.Circle((c + num_indcls, r), 5, fill=False) + ax.set_aspect(1) + ax.add_artist(hd ) + off_c, off_r = 6, 1 + if i == 1: + off_c, off_r = -4, -6 + plt.text(c + num_indcls + off_c, r + off_r, ood_cls_names[c], color='blue', fontsize=fontsize) + plt.plot([num_indcls, num_indcls + c], [r, r], 'r--') + plt.text(num_indcls + text_offset[i][0], r+text_offset[i][1], ind_cls_names[r], color='red', fontsize=fontsize) + plt.ylabel('Predicted Classes', fontsize=fontsize) + plt.xlabel('UCF-101 (known) + HMDB-51 (unknown)', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.tight_layout() + plt.savefig(args.save_file[:-4] + '_top.png', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig(args.save_file[:-4] + '_top.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--ood_result', help='the result file of ood detection') + parser.add_argument('--uncertain_thresh', type=float, default=0.0001, help='the threshold value for prediction') + parser.add_argument('--top_part', action='store_true', help='Whether to show the top part of confmat separately.') + parser.add_argument('--save_file', help='the image file path of generated confusion matrix') + args = parser.parse_args() + + results = np.load(args.ood_result, allow_pickle=True) + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + + # result path + result_path = os.path.dirname(args.save_file) + if not os.path.exists(result_path): + os.makedirs(result_path) + + # OOD classes are unknown + confmat1 = confusion_maxtrix(ind_labels, ind_results, ind_uncertainties, + ood_labels, ood_results, ood_uncertainties, + args.uncertain_thresh, know_ood_labels=False) + plot_confmat(confmat1, know_ood_labels=False) + + num_indcls = max(ind_labels) + 1 + num_oodcls = max(ood_labels) + 1 + UKC_value = np.mean(confmat1[:num_indcls, num_indcls:]) # unknown --> known (top-right) + UUC_value = np.mean(confmat1[num_indcls:, num_indcls:]) # unknown --> unknown (bottom-right) + KUC_value = np.mean(confmat1[num_indcls:, :num_indcls]) # known --> unknown (bottom-left) + KKC_value = np.mean(np.diag(confmat1[:num_indcls, :num_indcls])) # known --> known (top-left) + print("The average UUC=: %.6lf, UKC=%.6lf, KUC=%.6lf, KKC=%.6lf"%(UUC_value, UKC_value, KUC_value, KKC_value)) + + + + # # OOD classes are known + # confmat2 = confusion_maxtrix(ind_labels, ind_results, ind_uncertainties, + # ood_labels, ood_results, ood_uncertainties, + # args.uncertain_thresh, know_ood_labels=True) + # plot_confmat(confmat2, know_ood_labels=True) + + # # save the confusion matrix for further analysis + # np.savez(args.save_file[:-4], confmat_unknown_ood=confmat1, confmat_known_ood=confmat2) + # votes_ind = np.sum(confmat1[:101, 101:], axis=1) + # print("Top-10 false positive IND classes: ", np.argsort(votes_ind)[-10:]) + + # votes_ood = np.sum(confmat1[101:, :101], axis=1) + # print("Top-10 false negative IND classes: ", np.argsort(votes_ood)[-10:]) + + if args.top_part: + top_confmat = confusion_maxtrix_top(ind_labels, ind_results, ind_uncertainties, + ood_labels, ood_results, ood_uncertainties, + args.uncertain_thresh) + plot_top_confmat(top_confmat) \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/draw_fig7cd.py b/Downstream/Open-Set-Action-Recognition/experiments/draw_fig7cd.py new file mode 100644 index 0000000..b76f343 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/draw_fig7cd.py @@ -0,0 +1,70 @@ +import os +import numpy as np +import matplotlib.pyplot as plt + + +def plot_by_uncertainty(result_file, uncertainty='EDL', auc=80, fontsize=16, result_prefix=''): + assert os.path.exists(result_file), 'result file not exists! %s'%(result_file) + results = np.load(result_file, allow_pickle=True) + # ind_confidences = results['ind_conf'] + # ood_confidences = results['ood_conf'] + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + + # visualize + ind_uncertainties = np.array(ind_uncertainties) + ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize + ood_uncertainties = np.array(ood_uncertainties) + ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize + + fig = plt.figure(figsize=(5,4)) # (w, h) + plt.rcParams["font.family"] = "Arial" # Times New Roman + data_label = 'HMDB-51' if ood_data == 'HMDB' else 'MiT-v2' + counts, bins, bars = plt.hist([ind_uncertainties, ood_uncertainties], 50, + density=True, histtype='bar', color=['blue', 'red'], + label=['in-distribution (%s)'%(ind_data), 'out-of-distribution (%s)'%(data_label)]) + plt.legend(fontsize=fontsize-3) + plt.text(0.6, 6, 'AUC = %.2lf'%(auc), fontsize=fontsize-3) + plt.xlabel('%s uncertainty'%(uncertainty), fontsize=fontsize) + plt.ylabel('Density', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.xlim(0, 1.01) + plt.ylim(0, 10.01) + plt.tight_layout() + + result_dir = os.path.dirname(result_prefix) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # save the figure + plt.savefig(os.path.join(result_prefix + '_distribution.png'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig(os.path.join(result_prefix + '_distribution.pdf'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + return counts, bins, bars + + +if __name__ == '__main__': + + fontsize = 20 + ind_data = 'UCF-101' + ood_data = 'MiT' + # DRIVE (vanilla) + result_file = 'i3d/results/I3D_EDLNoKL_EDL_%s_result.npz'%(ood_data) + counts, bins, bars = plot_by_uncertainty(result_file, uncertainty='EDL', auc=81.43, fontsize=fontsize, result_prefix='temp_rebuttal/I3D_MiT_Vanilla') + counts = counts[:, 3:] + bins = bins[3:] + # mode = np.argsort(counts[1, :])[:5] + mode = np.argmax(counts[1, :]) + print('the most frequent bin:(' + str(bins[mode]) + ',' + str(bins[mode+1]) + ')') + + # DRIVE (full) + result_file = 'i3d/results/I3D_EDLNoKLAvUCCED_EDL_%s_result.npz'%(ood_data) + counts, bins, bars = plot_by_uncertainty(result_file, uncertainty='EDL', auc=81.54, fontsize=fontsize, result_prefix='temp_rebuttal/I3D_MiT_Full') + counts = counts[:, 3:] + bins = bins[3:] + # mode = np.argsort(counts[1, :])[:5] + mode = np.argmax(counts[1, :]) + print('the most frequent bin:(' + str(bins[mode]) + ',' + str(bins[mode+1]) + ')') \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/draw_ood_hist.py b/Downstream/Open-Set-Action-Recognition/experiments/draw_ood_hist.py new file mode 100644 index 0000000..2d6f7dd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/draw_ood_hist.py @@ -0,0 +1,306 @@ +import argparse +import os +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.ticker as ticker + + +def parse_args(): + parser = argparse.ArgumentParser(description='Draw histogram') + parser.add_argument('--uncertainty', default='EDL', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method') + parser.add_argument('--ind_data', default='UCF-101', help='the split file of in-distribution testing data') + parser.add_argument('--ood_data', default='HMDB', choices=['HMDB', 'MiT'], help='the split file of out-of-distribution testing data') + parser.add_argument('--model', default='I3D', choices=['I3D', 'TSM', 'SlowFast', 'TPN'], help='the action recognition model.') + parser.add_argument('--result_prefix', default='temp/temp.png', help='result file prefix') + args = parser.parse_args() + return args + +def plot_by_uncertainty(result_file, uncertainty='EDL', auc=80, fontsize=16): + assert os.path.exists(result_file), 'result file not exists! %s'%(result_file) + results = np.load(result_file, allow_pickle=True) + # ind_confidences = results['ind_conf'] + # ood_confidences = results['ood_conf'] + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + + # visualize + ind_uncertainties = np.array(ind_uncertainties) + ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize + ood_uncertainties = np.array(ood_uncertainties) + ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize + + fig = plt.figure(figsize=(5,4)) # (w, h) + plt.rcParams["font.family"] = "Arial" # Times New Roman + data_label = 'HMDB-51' if args.ood_data == 'HMDB' else 'MiT-v2' + plt.hist([ind_uncertainties, ood_uncertainties], 50, + density=True, histtype='bar', color=['blue', 'red'], + label=['in-distribution (%s)'%(args.ind_data), 'out-of-distribution (%s)'%(data_label)]) + plt.legend(fontsize=fontsize-3) + plt.text(0.6, 6, 'AUC = %.2lf'%(auc), fontsize=fontsize-3) + plt.xlabel('%s uncertainty'%(uncertainty), fontsize=fontsize) + plt.ylabel('Density', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.xlim(0, 1.01) + plt.ylim(0, 10.01) + plt.tight_layout() + + result_dir = os.path.dirname(args.result_prefix) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # save the figure + plt.savefig(os.path.join(args.result_prefix + '_distribution.png'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + + +def get_confidence(result_file, conf='softmax'): + # only for SoftMax and OpenMax + assert os.path.exists(result_file), 'result file not exists! %s'%(result_file) + results = np.load(result_file, allow_pickle=True) + if conf == 'softmax': + ind_score = results['ind_softmax'] # (N1, C) + ood_score = results['ood_softmax'] # (N2, C) + else: + ind_score = results['ind_openmax'] # (N1, C+1) + ood_score = results['ood_openmax'] # (N2, C+1) + ind_conf = np.max(ind_score, axis=1) + ood_conf = np.max(ood_score, axis=1) + return ind_conf, ood_conf + + +def plot_by_confidence(ind_confidence, ood_confidence, auc=80, fontsize=16): + # visualize + ind_conf = ind_confidence.copy() + ind_conf = (ind_conf-np.min(ind_conf)) / (np.max(ind_conf) - np.min(ind_conf) + 1e-6) # normalize + ood_conf = ood_confidence.copy() + ood_conf = (ood_conf-np.min(ood_conf)) / (np.max(ood_conf) - np.min(ood_conf) + 1e-6) # normalize + + fig = plt.figure(figsize=(5,4)) # (w, h) + plt.rcParams["font.family"] = "Arial" # Times New Roman + data_label = 'HMDB-51' if args.ood_data == 'HMDB' else 'MiT-v2' + plt.hist([ind_conf, ood_conf], 50, + density=True, histtype='bar', color=['blue', 'red'], + label=['in-distribution (%s)'%(args.ind_data), 'out-of-distribution (%s)'%(data_label)]) + plt.legend(fontsize=fontsize-3) + plt.text(0.6, 6, 'AUC = %.2lf'%(auc), fontsize=fontsize-3) + plt.xlabel('Confidence', fontsize=fontsize) + plt.ylabel('Density', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.xlim(0, 1.01) + plt.ylim(0, 10.01) + plt.tight_layout() + + result_dir = os.path.dirname(args.result_prefix) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # save the figure + plt.savefig(os.path.join(args.result_prefix + '_distribution.png'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf'), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + +def main_i3d(): + # common settings + fontsize = 18 if args.ood_data == 'HMDB' else 20 + + # SoftMax + result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results_baselines/softmax/I3D_SoftMax_Conf_%s'%(args.ood_data) + auc = 75.68 if args.ood_data == 'HMDB' else 79.94 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # OpenMax + result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results_baselines/openmax/I3D_OpenMax_Conf_%s'%(args.ood_data) + auc = 74.34 if args.ood_data == 'HMDB' else 77.76 + ind_conf, ood_conf = get_confidence(result_file, conf='openmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # RPL + result_file = 'i3d/results_baselines/rpl/I3D_RPL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results_baselines/rpl/I3D_RPL_Conf_%s'%(args.ood_data) + auc = 75.20 if args.ood_data == 'HMDB' else 79.16 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # MC Dropout + result_file = 'i3d/results/I3D_DNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results/I3D_DNN_BALD_%s'%(args.ood_data) + auc = 75.07 if args.ood_data == 'HMDB' else 79.14 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # BNN SVI + result_file = 'i3d/results/I3D_BNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results/I3D_BNN_BALD_%s'%(args.ood_data) + auc = 74.66 if args.ood_data == 'HMDB' else 79.50 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # DRIVE (vanilla) + result_file = 'i3d/results/I3D_EDLNoKL_EDL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results/I3D_EDLNoKL_EDL_%s'%(args.ood_data) + auc = 76.41 if args.ood_data == 'HMDB' else 81.43 + plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize) + + # DRIVE (full) + result_file = 'i3d/results/I3D_EDLNoKLAvUCCED_EDL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'i3d/results/I3D_EDLNoKLAvUCCED_EDL_%s'%(args.ood_data) + auc = 77.08 if args.ood_data == 'HMDB' else 81.54 + plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize) + + +def main_tsm(): + # common settings + fontsize = 18 if args.ood_data == 'HMDB' else 20 + + # SoftMax + result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tsm/results_baselines/softmax/TSM_SoftMax_Conf_%s'%(args.ood_data) + auc = 77.99 if args.ood_data == 'HMDB' else 82.38 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # OpenMax + result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tsm/results_baselines/openmax/TSM_OpenMax_Conf_%s'%(args.ood_data) + auc = 77.07 if args.ood_data == 'HMDB' else 83.05 + ind_conf, ood_conf = get_confidence(result_file, conf='openmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # RPL + result_file = 'tsm/results_baselines/rpl/TSM_RPL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tsm/results_baselines/rpl/TSM_RPL_Conf_%s'%(args.ood_data) + auc = 73.62 if args.ood_data == 'HMDB' else 77.28 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # MC Dropout + result_file = 'tsm/results/TSM_DNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tsm/results/TSM_DNN_BALD_%s'%(args.ood_data) + auc = 73.85 if args.ood_data == 'HMDB' else 78.35 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # BNN SVI + result_file = 'tsm/results/TSM_BNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tsm/results/TSM_BNN_BALD_%s'%(args.ood_data) + auc = 73.42 if args.ood_data == 'HMDB' else 77.39 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # DRIVE (full) + result_file = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s'%(args.ood_data) + auc = 78.65 if args.ood_data == 'HMDB' else 83.92 + plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize) + + +def main_slowfast(): + # common settings + fontsize = 18 if args.ood_data == 'HMDB' else 20 + + # SoftMax + result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'slowfast/results_baselines/softmax/SlowFast_SoftMax_Conf_%s'%(args.ood_data) + auc = 79.16 if args.ood_data == 'HMDB' else 82.88 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # OpenMax + result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_Conf_%s'%(args.ood_data) + auc = 78.76 if args.ood_data == 'HMDB' else 80.62 + ind_conf, ood_conf = get_confidence(result_file, conf='openmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # RPL + result_file = 'slowfast/results_baselines/rpl/SlowFast_RPL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'slowfast/results_baselines/rpl/SlowFast_RPL_Conf_%s'%(args.ood_data) + auc = 74.23 if args.ood_data == 'HMDB' else 77.42 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # MC Dropout + result_file = 'slowfast/results/SlowFast_DNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'slowfast/results/SlowFast_DNN_BALD_%s'%(args.ood_data) + auc = 75.41 if args.ood_data == 'HMDB' else 78.49 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # BNN SVI + result_file = 'slowfast/results/SlowFast_BNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'slowfast/results/SlowFast_BNN_BALD_%s'%(args.ood_data) + auc = 74.78 if args.ood_data == 'HMDB' else 77.39 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # DRIVE (full) + result_file = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s'%(args.ood_data) + auc = 82.94 if args.ood_data == 'HMDB' else 86.99 + plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize) + + +def main_tpn(): + # common settings + fontsize = 18 if args.ood_data == 'HMDB' else 20 + + # SoftMax + result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tpn_slowonly/results_baselines/softmax/TPN_SoftMax_Conf_%s'%(args.ood_data) + auc = 77.97 if args.ood_data == 'HMDB' else 81.35 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # OpenMax + result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_Conf_%s'%(args.ood_data) + auc = 74.12 if args.ood_data == 'HMDB' else 76.26 + ind_conf, ood_conf = get_confidence(result_file, conf='openmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # RPL + result_file = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_Conf_%s'%(args.ood_data) + auc = 75.32 if args.ood_data == 'HMDB' else 78.21 + ind_conf, ood_conf = get_confidence(result_file, conf='softmax') + plot_by_confidence(ind_conf, ood_conf, auc=auc, fontsize=fontsize) + + # MC Dropout + result_file = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s'%(args.ood_data) + auc = 74.13 if args.ood_data == 'HMDB' else 77.76 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # BNN SVI + result_file = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s'%(args.ood_data) + auc = 72.68 if args.ood_data == 'HMDB' else 75.32 + plot_by_uncertainty(result_file, uncertainty='BALD', auc=auc, fontsize=fontsize) + + # DRIVE (full) + result_file = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + args.result_prefix = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s'%(args.ood_data) + auc = 79.23 if args.ood_data == 'HMDB' else 81.80 + plot_by_uncertainty(result_file, uncertainty='EDL', auc=auc, fontsize=fontsize) + + +if __name__ == '__main__': + + args = parse_args() + + if args.model == 'I3D': + # draw results on I3D + main_i3d() + elif args.model == 'TSM': + # draw results on TSM + main_tsm() + elif args.model == 'SlowFast': + # draw results on SlowFast + main_slowfast() + elif args.model == 'TPN': + # draw results on TPN + main_tpn() + else: + raise NotImplementedError + + \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/draw_openness_curves.py b/Downstream/Open-Set-Action-Recognition/experiments/draw_openness_curves.py new file mode 100644 index 0000000..0b7dcca --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/draw_openness_curves.py @@ -0,0 +1,323 @@ +import os, argparse +import numpy as np +import matplotlib.pyplot as plt +from sklearn.metrics import f1_score + + +def softmax_curvepoints(result_file, thresh, ood_ncls, num_rand): + assert os.path.exists(result_file), "File not found! Run baseline_i3d_softmax.py first!" + # load the testing results + results = np.load(result_file, allow_pickle=True) + ind_softmax = results['ind_softmax'] # (N1, C) + ood_softmax = results['ood_softmax'] # (N2, C) + ind_labels = results['ind_label'] # (N1,) + ood_labels = results['ood_label'] # (N2,) + + ind_ncls = ind_softmax.shape[1] + ind_results = np.argmax(ind_softmax, axis=1) + ood_results = np.argmax(ood_softmax, axis=1) + ind_conf = np.max(ind_softmax, axis=1) + ood_conf = np.max(ood_softmax, axis=1) + + ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection + # open set F1 score (multi-class) + macro_F1 = f1_score(ind_labels, ind_results, average='macro') + macro_F1_list = [macro_F1 * 100] + openness_list = [0] + for n in range(ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((num_rand), dtype=np.float32) + for m in range(num_rand): + cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls + ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select]) + ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) * 100 + macro_F1_list.append(macro_F1) + return openness_list, macro_F1_list + +def openmax_curvepoints(result_file, ood_ncls, num_rand): + assert os.path.exists(result_file), "File not found! Run baseline_i3d_openmax.py first!" + results = np.load(result_file, allow_pickle=True) + ind_openmax = results['ind_openmax'] # (N1, C+1) + ood_openmax = results['ood_openmax'] # (N2, C+1) + ind_labels = results['ind_label'] # (N1,) + ood_labels = results['ood_label'] # (N2,) + ind_results = np.argmax(ind_openmax, axis=1) + ood_results = np.argmax(ood_openmax, axis=1) + ind_ncls = ind_openmax.shape[1] - 1 # (C+1)-1 + + # open set F1 score (multi-class) + macro_F1 = f1_score(ind_labels, ind_results, average='macro') + macro_F1_list = [macro_F1 * 100] + openness_list = [0] + for n in range(ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((num_rand), dtype=np.float32) + for m in range(num_rand): + cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) * 100 + macro_F1_list.append(macro_F1) + return openness_list, macro_F1_list + + +def uncertainty_curvepoints(result_file, thresh, ind_ncls, ood_ncls, num_rand): + assert os.path.exists(result_file), "File not found! Run ood_detection first!" + # load the testing results + results = np.load(result_file, allow_pickle=True) + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + # open set F1 score (multi-class) + ind_results[ind_uncertainties > thresh] = ind_ncls # falsely rejection + macro_F1 = f1_score(ind_labels, ind_results, average='macro') + macro_F1_list = [macro_F1 * 100] + openness_list = [0] + for n in range(ood_ncls): + ncls_novel = n + 1 + openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100 + openness_list.append(openness) + # randoml select the subset of ood samples + macro_F1_multi = np.zeros((num_rand), dtype=np.float32) + for m in range(num_rand): + cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False) + ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select]) + ood_sub_uncertainties = np.concatenate([ood_uncertainties[ood_labels == clsid] for clsid in cls_select]) + ood_sub_results[ood_sub_uncertainties > thresh] = ind_ncls # correctly rejection + ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls + # construct preds and labels + preds = np.concatenate((ind_results, ood_sub_results), axis=0) + labels = np.concatenate((ind_labels, ood_sub_labels), axis=0) + macro_F1_multi[m] = f1_score(labels, preds, average='macro') + macro_F1 = np.mean(macro_F1_multi) * 100 + macro_F1_list.append(macro_F1) + return openness_list, macro_F1_list + + +def plot_all_curves(openness, values, line_styles, result_prefix, ylim=[60, 80], fontsize=18): + fig = plt.figure(figsize=(8,6)) # (w, h) + plt.rcParams["font.family"] = "Arial" + for k, v in values.items(): + plt.plot(openness, v, line_styles[k], linewidth=2, label=k) + plt.xlim(0, max(openness)) + plt.ylim(ylim) + plt.xlabel('Openness (%)', fontsize=fontsize) + plt.ylabel('Open maF1 (%)', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(np.arange(ylim[0], ylim[1]+1, 5), fontsize=fontsize) + plt.grid('on') + plt.legend(fontsize=fontsize-10, loc='lower center', ncol=3, handletextpad=0.3, columnspacing=0.5) + plt.tight_layout() + result_path = os.path.dirname(result_prefix) + if not os.path.exists(result_path): + os.makedirs(result_path) + plt.savefig(result_prefix + '_%s.png'%(args.ood_data), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig(result_prefix + '_%s.pdf'%(args.ood_data), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + +def main_i3d(): + # SoftMax + print('Compute Open maF1 for SoftMax...') + result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data) + openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.996825, args.ood_ncls, args.num_rand) + + # OpenMax + print('Compute Open maF1 for OpenMax...') + result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data) + openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand) + + # RPL + print('Compute Open maF1 for RPL...') + result_file = 'i3d/results_baselines/rpl/I3D_RPL_%s_result.npz'%(args.ood_data) + openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.995178, args.ood_ncls, args.num_rand) + + # MCDropout BALD + print('Compute Open maF1 for MC Dropout BALD...') + result_file = 'i3d/results/I3D_DNN_BALD_%s_result.npz'%(args.ood_data) + openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000433, args.ind_ncls, args.ood_ncls, args.num_rand) + + # BNN SVI BALD + print('Compute Open maF1 for BNN SVI BALD...') + result_file = 'i3d/results/I3D_BNN_BALD_%s_result.npz'%(args.ood_data) + openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000004, args.ind_ncls, args.ood_ncls, args.num_rand) + + # DEAR (full) + print('Compute Open maF1 for DEAR (full)...') + result_file = 'i3d/results/I3D_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004550, args.ind_ncls, args.ood_ncls, args.num_rand) + + # draw F1 curve + line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'} + values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax} + result_prefix = args.result_prefix + '_I3D' + plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=[60,80], fontsize=30) + + +def main_tsm(): + # SoftMax + print('Compute Open maF1 for SoftMax...') + result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data) + openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.999683, args.ood_ncls, args.num_rand) + + # OpenMax + print('Compute Open maF1 for OpenMax...') + result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data) + openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand) + + # RPL + print('Compute Open maF1 for RPL...') + result_file = 'tsm/results_baselines/rpl/TSM_RPL_%s_result.npz'%(args.ood_data) + openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.999167, args.ood_ncls, args.num_rand) + + # MCDropout BALD + print('Compute Open maF1 for MC Dropout BALD...') + result_file = 'tsm/results/TSM_DNN_BALD_%s_result.npz'%(args.ood_data) + openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000022, args.ind_ncls, args.ood_ncls, args.num_rand) + + # BNN SVI BALD + print('Compute Open maF1 for BNN SVI BALD...') + result_file = 'tsm/results/TSM_BNN_BALD_%s_result.npz'%(args.ood_data) + openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000003, args.ind_ncls, args.ood_ncls, args.num_rand) + + # DEAR (full) + print('Compute Open maF1 for DEAR (full)...') + result_file = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004549, args.ind_ncls, args.ood_ncls, args.num_rand) + + # draw F1 curve + line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'} + values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax} + result_prefix = args.result_prefix + '_TSM' + ylim = [60, 90] if args.ood_data == 'HMDB' else [55, 90] + plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=ylim, fontsize=30) + + +def main_slowfast(): + # SoftMax + print('Compute Open maF1 for SoftMax...') + result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data) + openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.997915, args.ood_ncls, args.num_rand) + + # OpenMax + print('Compute Open maF1 for OpenMax...') + result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data) + openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand) + + # RPL + print('Compute Open maF1 for RPL...') + result_file = 'slowfast/results_baselines/rpl/SlowFast_RPL_%s_result.npz'%(args.ood_data) + openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.997780, args.ood_ncls, args.num_rand) + + # MCDropout BALD + print('Compute Open maF1 for MC Dropout BALD...') + result_file = 'slowfast/results/SlowFast_DNN_BALD_%s_result.npz'%(args.ood_data) + openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000065, args.ind_ncls, args.ood_ncls, args.num_rand) + + # BNN SVI BALD + print('Compute Open maF1 for BNN SVI BALD...') + result_file = 'slowfast/results/SlowFast_BNN_BALD_%s_result.npz'%(args.ood_data) + openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000004, args.ind_ncls, args.ood_ncls, args.num_rand) + + # DEAR (full) + print('Compute Open maF1 for DEAR (full)...') + result_file = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004552, args.ind_ncls, args.ood_ncls, args.num_rand) + + # draw F1 curve + line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'} + values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax} + result_prefix = args.result_prefix + '_SlowFast' + plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=[60,90], fontsize=30) + + + +def main_tpn(): + # SoftMax + print('Compute Open maF1 for SoftMax...') + result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data) + openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.997623, args.ood_ncls, args.num_rand) + + # OpenMax + print('Compute Open maF1 for OpenMax...') + result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data) + openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand) + + # RPL + print('Compute Open maF1 for RPL...') + result_file = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_%s_result.npz'%(args.ood_data) + openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.996931, args.ood_ncls, args.num_rand) + + # MCDropout BALD + print('Compute Open maF1 for MC Dropout BALD...') + result_file = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s_result.npz'%(args.ood_data) + openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000096, args.ind_ncls, args.ood_ncls, args.num_rand) + + # BNN SVI BALD + print('Compute Open maF1 for BNN SVI BALD...') + result_file = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s_result.npz'%(args.ood_data) + openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000007, args.ind_ncls, args.ood_ncls, args.num_rand) + + # DEAR (full) + print('Compute Open maF1 for DEAR (full)...') + result_file = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data) + openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004555, args.ind_ncls, args.ood_ncls, args.num_rand) + + # draw F1 curve + line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'} + values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax} + result_prefix = args.result_prefix + '_TPN' + ylim = [50, 85] if args.ood_data == 'HMDB' else [50, 85] + plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=ylim, fontsize=30) + +def parse_args(): + parser = argparse.ArgumentParser(description='Compare the performance of Open macroF1 against openness') + # model config + parser.add_argument('--ind_ncls', type=int, default=101, help='the number of classes in known dataset') + parser.add_argument('--ood_ncls', type=int, default=51, choices=[51, 305], help='the number of classes in unknwon dataset') + parser.add_argument('--ood_data', default='HMDB', choices=['HMDB', 'MiT'], help='the name of OOD dataset.') + parser.add_argument('--model', default='I3D', choices=['I3D', 'TSM', 'SlowFast', 'TPN'], help='the action recognition model.') + parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes') + parser.add_argument('--result_prefix', default='../temp/F1_openness') + args = parser.parse_args() + return args + +if __name__ == '__main__': + """ Example script: + python draw_openness_curves.py --model I3D --ood_data MiT --ood_ncls 305 + """ + np.random.seed(123) + args = parse_args() + + if args.model == 'I3D': + # draw results on I3D + main_i3d() + elif args.model == 'TSM': + # draw results on TSM + main_tsm() + elif args.model == 'SlowFast': + # draw results on SlowFast + main_slowfast() + elif args.model == 'TPN': + # draw results on TPN + main_tpn() + else: + raise NotImplementedError diff --git a/Downstream/Open-Set-Action-Recognition/experiments/draw_performance_gain.py b/Downstream/Open-Set-Action-Recognition/experiments/draw_performance_gain.py new file mode 100644 index 0000000..19cf6a1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/draw_performance_gain.py @@ -0,0 +1,298 @@ +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.lines import Line2D + + +def draw_curves(): + fig = plt.figure(figsize=(8,5)) + plt.rcParams["font.family"] = "Arial" + fontsize = 15 + markersize = 80 + + # I3D + I3D_DNN_HMDB = [94.69, 75.07] # (closed-set ACC, open-set AUC) + I3D_DNN_MiT = [94.69, 79.14] + I3D_DEAR_HMDB = [94.34, 77.08] + I3D_DEAR_MiT = [94.34, 81.54] + # TSM + TSM_DNN_HMDB = [95.11, 73.85] + TSM_DNN_MiT = [95.11, 78.35] + TSM_DEAR_HMDB = [94.45, 78.65] + TSM_DEAR_MiT = [94.45, 83.92] + # TPN + TPN_DNN_HMDB = [95.41, 74.13] + TPN_DNN_MiT = [95.41, 77.76] + TPN_DEAR_HMDB = [96.42, 79.23] + TPN_DEAR_MiT = [96.42, 81.80] + # SlowFast + SlowFast_DNN_HMDB = [96.72, 75.41] + SlowFast_DNN_MiT = [96.72, 78.49] + SlowFast_DEAR_HMDB = [96.66, 82.94] + SlowFast_DEAR_MiT = [96.66, 86.99] + + # Line: DNN for HMDB + plt.plot([I3D_DNN_HMDB[0], TSM_DNN_HMDB[0], TPN_DNN_HMDB[0], SlowFast_DNN_HMDB[0]], + [I3D_DNN_HMDB[1], TSM_DNN_HMDB[1], TPN_DNN_HMDB[1], SlowFast_DNN_HMDB[1]], 'r-', linewidth=2, label='HMDB') + # Line: DEAR for HMDB + plt.plot([I3D_DEAR_HMDB[0], TSM_DEAR_HMDB[0], TPN_DEAR_HMDB[0], SlowFast_DEAR_HMDB[0]], + [I3D_DEAR_HMDB[1], TSM_DEAR_HMDB[1], TPN_DEAR_HMDB[1], SlowFast_DEAR_HMDB[1]], 'r-', linewidth=2) + # Line: DNN for MiT + plt.plot([I3D_DNN_MiT[0], TSM_DNN_MiT[0], TPN_DNN_MiT[0], SlowFast_DNN_MiT[0]], + [I3D_DNN_MiT[1], TSM_DNN_MiT[1], TPN_DNN_MiT[1], SlowFast_DNN_MiT[1]], 'b-', linewidth=2, label='MiT') + # Line: DEAR for MiT + plt.plot([I3D_DEAR_MiT[0], TSM_DEAR_MiT[0], TPN_DEAR_MiT[0], SlowFast_DEAR_MiT[0]], + [I3D_DEAR_MiT[1], TSM_DEAR_MiT[1], TPN_DEAR_MiT[1], SlowFast_DEAR_MiT[1]], 'b-', linewidth=2) + + + # Draw all I3D points + # HMDB + plt.scatter(I3D_DNN_HMDB[0], I3D_DNN_HMDB[1], marker='^', s=markersize, color='r', label='Dropout BALD') + plt.text(I3D_DNN_HMDB[0], I3D_DNN_HMDB[1], 'I3D', fontsize=fontsize) + plt.scatter(I3D_DEAR_HMDB[0], I3D_DEAR_HMDB[1], marker='*', s=markersize, color='r', label='DEAR EU') + plt.text(I3D_DEAR_HMDB[0], I3D_DEAR_HMDB[1], 'I3D', fontsize=fontsize) + plt.plot([I3D_DNN_HMDB[0], I3D_DEAR_HMDB[0]], [I3D_DNN_HMDB[1], I3D_DEAR_HMDB[1]], 'r--', linewidth=0.5) + # plt.arrow(I3D_DNN_HMDB[0]+1, I3D_DNN_HMDB[1], I3D_DEAR_HMDB[0]-I3D_DNN_HMDB[0]-2, I3D_DEAR_HMDB[1]-I3D_DNN_HMDB[1]-1,head_width=0.8, fc='skyblue',ec='skyblue', head_length=0.8) + # # MiT + plt.scatter(I3D_DNN_MiT[0], I3D_DNN_MiT[1], marker='^', s=markersize, color='b') + plt.text(I3D_DNN_MiT[0], I3D_DNN_MiT[1], 'I3D', fontsize=fontsize) + plt.scatter(I3D_DEAR_MiT[0], I3D_DEAR_MiT[1], marker='*', s=markersize, color='b') + plt.text(I3D_DEAR_MiT[0], I3D_DEAR_MiT[1], 'I3D', fontsize=fontsize) + plt.plot([I3D_DNN_MiT[0], I3D_DEAR_MiT[0]], [I3D_DNN_MiT[1], I3D_DEAR_MiT[1]], 'b--', linewidth=0.5) + # plt.arrow(I3D_DNN_MiT[0]+1, I3D_DNN_MiT[1], I3D_DEAR_MiT[0]-I3D_DNN_MiT[0]-3, I3D_DEAR_MiT[1]-I3D_DNN_MiT[1]-2,head_width=0.8, fc='grey',ec='grey', head_length=0.8) + + # Draw all TSM points + # HMDB + plt.scatter(TSM_DNN_HMDB[0], TSM_DNN_HMDB[1], marker='^', s=markersize, color='r') + plt.text(TSM_DNN_HMDB[0], TSM_DNN_HMDB[1], 'TSM', fontsize=fontsize) + plt.scatter(TSM_DEAR_HMDB[0], TSM_DEAR_HMDB[1], marker='*', s=markersize, color='r') + plt.text(TSM_DEAR_HMDB[0], TSM_DEAR_HMDB[1], 'TSM', fontsize=fontsize) + plt.plot([TSM_DNN_HMDB[0], TSM_DEAR_HMDB[0]], [TSM_DNN_HMDB[1], TSM_DEAR_HMDB[1]], 'r--', linewidth=0.5) + # plt.arrow(TSM_DNN_HMDB[0]+1, TSM_DNN_HMDB[1], TSM_DEAR_HMDB[0]-TSM_DNN_HMDB[0]-2, TSM_DEAR_HMDB[1]-TSM_DNN_HMDB[1]-1,head_width=0.8, fc='skyblue',ec='skyblue', head_length=0.8) + # # MiT + plt.scatter(TSM_DNN_MiT[0], TSM_DNN_MiT[1], marker='^', s=markersize, color='b') + plt.text(TSM_DNN_MiT[0], TSM_DNN_MiT[1], 'TSM', fontsize=fontsize) + plt.scatter(TSM_DEAR_MiT[0], TSM_DEAR_MiT[1], marker='*', s=markersize, color='b') + plt.text(TSM_DEAR_MiT[0], TSM_DEAR_MiT[1], 'TSM', fontsize=fontsize) + plt.plot([TSM_DNN_MiT[0], TSM_DEAR_MiT[0]], [TSM_DNN_MiT[1], TSM_DEAR_MiT[1]], 'b--', linewidth=0.5) + + # Draw all TPN points + # HMDB + plt.scatter(TPN_DNN_HMDB[0], TPN_DNN_HMDB[1], marker='^', s=markersize, color='r') + plt.text(TPN_DNN_HMDB[0], TPN_DNN_HMDB[1], 'TPN', fontsize=fontsize) + plt.scatter(TPN_DEAR_HMDB[0], TPN_DEAR_HMDB[1], marker='*', s=markersize, color='r') + plt.text(TPN_DEAR_HMDB[0], TPN_DEAR_HMDB[1], 'TPN', fontsize=fontsize) + plt.plot([TPN_DNN_HMDB[0], TPN_DEAR_HMDB[0]], [TPN_DNN_HMDB[1], TPN_DEAR_HMDB[1]], 'r--', linewidth=0.5) + # plt.arrow(TPN_DNN_HMDB[0]+1, TPN_DNN_HMDB[1], TPN_DEAR_HMDB[0]-TPN_DNN_HMDB[0]-2, TPN_DEAR_HMDB[1]-TPN_DNN_HMDB[1]-1,head_width=0.8, fc='skyblue',ec='skyblue', head_length=0.8) + plt.scatter(TPN_DNN_MiT[0], TPN_DNN_MiT[1], marker='^', s=markersize, color='b') + plt.text(TPN_DNN_MiT[0], TPN_DNN_MiT[1], 'TPN', fontsize=fontsize) + plt.scatter(TPN_DEAR_MiT[0], TPN_DEAR_MiT[1], marker='*', s=markersize, color='b') + plt.text(TPN_DEAR_MiT[0], TPN_DEAR_MiT[1], 'TPN', fontsize=fontsize) + plt.plot([TPN_DNN_MiT[0], TPN_DEAR_MiT[0]], [TPN_DNN_MiT[1], TPN_DEAR_MiT[1]], 'b--', linewidth=0.5) + + # Draw all SlowFast points + # HMDB + plt.scatter(SlowFast_DNN_HMDB[0], SlowFast_DNN_HMDB[1], marker='^', s=markersize, color='r') + plt.text(SlowFast_DNN_HMDB[0], SlowFast_DNN_HMDB[1], 'SlowFast', fontsize=fontsize) + plt.scatter(SlowFast_DEAR_HMDB[0], SlowFast_DEAR_HMDB[1], marker='*', s=markersize, color='r') + plt.text(SlowFast_DEAR_HMDB[0], SlowFast_DEAR_HMDB[1], 'SlowFast', fontsize=fontsize) + plt.plot([SlowFast_DNN_HMDB[0], SlowFast_DEAR_HMDB[0]], [SlowFast_DNN_HMDB[1], SlowFast_DEAR_HMDB[1]], 'r--', linewidth=0.5) + # MiT + plt.scatter(SlowFast_DNN_MiT[0], SlowFast_DNN_MiT[1], marker='^', s=markersize, color='b') + plt.text(SlowFast_DNN_MiT[0], SlowFast_DNN_MiT[1], 'SlowFast', fontsize=fontsize) + plt.scatter(SlowFast_DEAR_MiT[0], SlowFast_DEAR_MiT[1], marker='*', s=markersize, color='b') + plt.text(SlowFast_DEAR_MiT[0], SlowFast_DEAR_MiT[1], 'SlowFast', fontsize=fontsize) + plt.plot([SlowFast_DNN_MiT[0], SlowFast_DEAR_MiT[0]], [SlowFast_DNN_MiT[1], SlowFast_DEAR_MiT[1]], 'b--', linewidth=0.5) + + plt.xlim(94, 97.5) + plt.ylim(65, 90) + plt.xlabel('Closed-Set Accuracy (%)', fontsize=fontsize) + plt.ylabel('Open-Set AUC Score (%)', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.legend(loc='lower left', fontsize=fontsize) + plt.grid('on', linestyle='--') + plt.tight_layout() + + plt.savefig('../temp/compare_gain.png') + plt.savefig('../temp/compare_gain.pdf') + + +def draw_one_curve(data_dict, markers, markercolor='g', markersize=80, fontsize=10, label='I3D', linestyle='g-', add_marker_text=False, text_offset=[0,0]): + + sorted_dict = dict(sorted(data_dict.items(),key=lambda x:x[1][1])) + + x_data, y_data = [], [] + for k, v in sorted_dict.items(): + x_data.append(v[1]) + y_data.append(v[0]) + # Marker: OpenMax + if k == 'DEAR (Ours)': + plt.scatter(v[1], v[0], marker=markers[k], s=markersize*4, color=markercolor) + else: + plt.scatter(v[1], v[0], marker=markers[k], s=markersize, color=markercolor) + if add_marker_text: + # plt.text(v[1] + text_offset[0], v[0]+ text_offset[1], k, fontsize=fontsize) + pass + # Line: I3D for MiT + line_hd, = plt.plot(x_data, y_data, linestyle, linewidth=2, label=label, markersize=1) + return line_hd + + +def draw_mit_curves(): + fig, ax = plt.subplots(figsize=(8,6)) + plt.rcParams["font.family"] = "Arial" + fontsize = 25 + markersize = 80 + + # (open maF1, open-set AUC) + # I3D + I3D_OpenMax = [66.22, 77.76] + I3D_Dropout = [68.11, 79.14] + I3D_BNNSVI = [68.65, 79.50] + I3D_SoftMax = [68.84, 79.94] + I3D_RPL = [68.11, 79.16] + I3D_DEAR = [69.98, 81.54] + # TSM + TSM_OpenMax = [71.81, 83.05] + TSM_Dropout = [65.32, 78.35] + TSM_BNNSVI = [64.28, 77.39] + TSM_SoftMax = [71.68, 82.38] + TSM_RPL = [63.92, 77.28] + TSM_DEAR = [70.15, 83.92] + # TPN + TPN_OpenMax = [64.80, 76.26] + TPN_Dropout = [65.77, 77.76] + TPN_BNNSVI = [61.40, 75.32] + TPN_SoftMax = [70.82, 81.35] + TPN_RPL = [66.21, 78.21] + TPN_DEAR = [71.18, 81.80] + # SlowFast + SlowFast_OpenMax = [72.48, 80.62] + SlowFast_Dropout = [67.53, 78.49] + SlowFast_BNNSVI = [65.22, 77.39] + SlowFast_SoftMax = [74.42, 82.88] + SlowFast_RPL = [66.33, 77.42] + SlowFast_DEAR = [77.28, 86.99] + + markers = {'DEAR (Ours)': '*', 'SoftMax': 'o', 'OpenMax': '^', 'RPL': 'd', 'MC Dropout': 's', 'BNN SVI': 'P'} + + # Line: I3D for MiT + data_dict = {'OpenMax': I3D_OpenMax, 'MC Dropout': I3D_Dropout, 'BNN SVI': I3D_BNNSVI, 'SoftMax': I3D_SoftMax, 'RPL': I3D_RPL, 'DEAR (Ours)': I3D_DEAR} + line1_hd = draw_one_curve(data_dict, markers=markers, markercolor='g', markersize=markersize, fontsize=fontsize, label='I3D', linestyle='g-') + + data_dict = {'OpenMax': TSM_OpenMax, 'MC Dropout': TSM_Dropout, 'BNN SVI': TSM_BNNSVI, 'SoftMax': TSM_SoftMax, 'RPL': TSM_RPL, 'DEAR (Ours)': TSM_DEAR} + line2_hd = draw_one_curve(data_dict, markers=markers, markercolor='k', markersize=markersize, fontsize=fontsize, label='TSM', linestyle='k-') + + data_dict = {'OpenMax': TPN_OpenMax, 'MC Dropout': TPN_Dropout, 'BNN SVI': TPN_BNNSVI, 'SoftMax': TPN_SoftMax, 'RPL': TPN_RPL, 'DEAR (Ours)': TPN_DEAR} + line3_hd = draw_one_curve(data_dict, markers=markers, markercolor='b', markersize=markersize, fontsize=fontsize, label='TPN', linestyle='b-') + + data_dict = {'OpenMax': SlowFast_OpenMax, 'MC Dropout': SlowFast_Dropout, 'BNN SVI': SlowFast_BNNSVI, 'SoftMax': SlowFast_SoftMax, 'RPL': SlowFast_RPL, 'DEAR (Ours)': SlowFast_DEAR} + line4_hd = draw_one_curve(data_dict, markers=markers, markercolor='r', markersize=markersize, fontsize=fontsize, label='SlowFast', linestyle='r-', + add_marker_text=True, text_offset=[-2.2, -0.2]) + + marker_elements = [] + for k, v in markers.items(): + msize = 18 if k == 'DEAR (Ours)' else 12 + elem = Line2D([0], [0], marker=v, label=k, markersize=msize, linestyle="None") + marker_elements.append(elem) + marker_legend = ax.legend(handles=marker_elements, fontsize=fontsize-3, loc='lower right', ncol=1, handletextpad=0.05, columnspacing=0.05, borderaxespad=0.1) + ax.add_artist(marker_legend) + + plt.ylim(60, 78) + plt.xlim(75, 88) + plt.ylabel('Open maF1 (%)', fontsize=fontsize) + plt.xlabel('Open-Set AUC Score (%)', fontsize=fontsize) + plt.xticks(np.arange(75, 89, 4), fontsize=fontsize) + plt.yticks(np.arange(60, 79, 4), fontsize=fontsize) + plt.legend(handles=[line1_hd, line2_hd, line3_hd, line4_hd], loc='upper left', fontsize=fontsize-3, handletextpad=0.5, borderaxespad=0.1) + plt.title('MiT-v2 as Unknown', fontsize=fontsize) + plt.grid('on', linestyle='--') + plt.tight_layout() + plt.savefig('../temp/compare_gain_mit.png', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig('../temp/compare_gain_mit.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + + + +def draw_hmdb_curves(): + fig, ax = plt.subplots(figsize=(8,6)) + plt.rcParams["font.family"] = "Arial" + fontsize = 25 + markersize = 80 + + # (open maF1, open-set AUC) + # I3D + I3D_OpenMax = [67.85, 74.34] + I3D_Dropout = [71.13, 75.07] + I3D_BNNSVI = [71.57, 74.66] + I3D_SoftMax = [73.19, 75.68] + I3D_RPL = [71.48, 75.20] + I3D_DEAR = [77.24, 77.08] + # TSM + TSM_OpenMax = [74.17, 77.07] + TSM_Dropout = [71.52, 73.85] + TSM_BNNSVI = [69.11, 73.42] + TSM_SoftMax = [78.27, 77.99] + TSM_RPL = [69.34, 73.62] + TSM_DEAR = [84.69, 78.65] + # TPN + TPN_OpenMax = [65.27, 74.12] + TPN_Dropout = [68.45, 74.13] + TPN_BNNSVI = [63.81, 72.68] + TPN_SoftMax = [76.23, 77.97] + TPN_RPL = [70.31, 75.32] + TPN_DEAR = [81.79, 79.23] + # SlowFast + SlowFast_OpenMax = [73.57, 78.76] + SlowFast_Dropout = [70.55, 75.41] + SlowFast_BNNSVI = [69.19, 74.78] + SlowFast_SoftMax = [78.04, 79.16] + SlowFast_RPL = [68.32, 74.23] + SlowFast_DEAR = [85.48, 82.94] + + markers = {'DEAR (Ours)': '*', 'SoftMax': 'o', 'OpenMax': '^', 'RPL': 'd', 'MC Dropout': 's', 'BNN SVI': 'P'} + + # Line: I3D for HMDB + data_dict = {'OpenMax': I3D_OpenMax, 'MC Dropout': I3D_Dropout, 'BNN SVI': I3D_BNNSVI, 'SoftMax': I3D_SoftMax, 'RPL': I3D_RPL, 'DEAR (Ours)': I3D_DEAR} + line1_hd = draw_one_curve(data_dict, markers=markers, markercolor='g', markersize=markersize, fontsize=fontsize, label='I3D', linestyle='g-') + + data_dict = {'OpenMax': TSM_OpenMax, 'MC Dropout': TSM_Dropout, 'BNN SVI': TSM_BNNSVI, 'SoftMax': TSM_SoftMax, 'RPL': TSM_RPL, 'DEAR (Ours)': TSM_DEAR} + line2_hd = draw_one_curve(data_dict, markers=markers, markercolor='k', markersize=markersize, fontsize=fontsize, label='TSM', linestyle='k-') + + data_dict = {'OpenMax': TPN_OpenMax, 'MC Dropout': TPN_Dropout, 'BNN SVI': TPN_BNNSVI, 'SoftMax': TPN_SoftMax, 'RPL': TPN_RPL, 'DEAR (Ours)': TPN_DEAR} + line3_hd = draw_one_curve(data_dict, markers=markers, markercolor='b', markersize=markersize, fontsize=fontsize, label='TPN', linestyle='b-') + + data_dict = {'OpenMax': SlowFast_OpenMax, 'MC Dropout': SlowFast_Dropout, 'BNN SVI': SlowFast_BNNSVI, 'SoftMax': SlowFast_SoftMax, 'RPL': SlowFast_RPL, 'DEAR (Ours)': SlowFast_DEAR} + line4_hd = draw_one_curve(data_dict, markers=markers, markercolor='r', markersize=markersize, fontsize=fontsize, label='SlowFast', linestyle='r-', + add_marker_text=True, text_offset=[0.2, -1.5]) + + + marker_elements = [] + for k, v in markers.items(): + msize = 18 if k == 'DEAR (Ours)' else 12 + elem = Line2D([0], [0], marker=v, label=k, markersize=msize, linestyle="None") + marker_elements.append(elem) + marker_legend = ax.legend(handles=marker_elements, fontsize=fontsize-3, loc='lower right', ncol=1, handletextpad=0.3, columnspacing=0.2, borderaxespad=0.1) + ax.add_artist(marker_legend) + + plt.ylim(60, 88) + plt.xlim(72, 85) + plt.ylabel('Open maF1 (%)', fontsize=fontsize) + plt.xlabel('Open-Set AUC Score (%)', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.legend(handles=[line1_hd, line2_hd, line3_hd, line4_hd], loc='upper left', fontsize=fontsize-3, handletextpad=0.5, borderaxespad=0.1) + plt.grid('on', linestyle='--') + plt.title('HMDB-51 as Unknown', fontsize=fontsize) + plt.tight_layout() + plt.savefig('../temp/compare_gain_hmdb.png', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + plt.savefig('../temp/compare_gain_hmdb.pdf', bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0) + + +if __name__ == '__main__': + + models = ['I3D', 'TPN', 'TSM', 'SlowFast'] + + # draw_curves() + + draw_mit_curves() + draw_hmdb_curves() diff --git a/Downstream/Open-Set-Action-Recognition/experiments/eval_debias.py b/Downstream/Open-Set-Action-Recognition/experiments/eval_debias.py new file mode 100644 index 0000000..1aaeffb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/eval_debias.py @@ -0,0 +1,129 @@ +import argparse +import os +import torch +import mmcv +from mmaction.apis import init_recognizer +from mmaction.datasets import build_dataloader, build_dataset +from mmaction.core.evaluation import top_k_accuracy +from mmcv.parallel import MMDataParallel +import numpy as np +import torch.multiprocessing +torch.multiprocessing.set_sharing_strategy('file_system') + + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file/url') + parser.add_argument('--split_file', help='the split file for evaluation') + parser.add_argument('--video_path', help='the video path for evaluation') + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + args = parser.parse_args() + return args + + +def set_deterministic(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if you are using multi-GPU. + np.random.seed(seed) # Numpy module. + torch.backends.cudnn.benchmark = True + torch.backends.cudnn.deterministic = True + + +def init_inference(): + # build the recognizer from a config file and checkpoint file/url + model = init_recognizer(args.config, args.checkpoint, device=device, use_frames=False) + cfg = model.cfg + cfg.data.test.test_mode = True + cfg.data.test.ann_file = args.split_file + cfg.data.test.data_prefix = args.video_path + evidence = cfg.get('evidence', 'exp') + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + dist=False, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + return model, data_loader, evidence + + +def run_evidence_inference(model, data_loader, evidence='exp'): + # get the evidence function + if evidence == 'relu': + from mmaction.models.losses.edl_loss import relu_evidence as get_evidence + elif evidence == 'exp': + from mmaction.models.losses.edl_loss import exp_evidence as get_evidence + elif evidence == 'softplus': + from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence + else: + raise NotImplementedError + num_classes = model.cls_head.num_classes + + # run inference + model = MMDataParallel(model, device_ids=[0]) + all_scores, all_uncertainties, all_labels = [], [], [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + output = model(return_loss=False, **data) + evidence = get_evidence(torch.from_numpy(output)) + alpha = evidence + 1 + uncertainty = num_classes / torch.sum(alpha, dim=1) + scores = alpha / torch.sum(alpha, dim=1, keepdim=True) + all_uncertainties.append(uncertainty.numpy()) + all_scores.append(scores.numpy()) + labels = data['label'].numpy() + all_labels.append(labels) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + all_scores = np.concatenate(all_scores, axis=0) + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + all_labels = np.concatenate(all_labels, axis=0) + + return all_scores, all_uncertainties, all_labels + + +def main(): + + result_file = os.path.join(args.result_prefix + '_result.npz') + if not os.path.exists(result_file): + # prepare result path + result_dir = os.path.dirname(result_file) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # init + model, data_loader, evidence = init_inference() + # run inference + all_scores, all_uncertainties, all_labels = run_evidence_inference(model, data_loader, evidence) + # save + np.savez(result_file[:-4], score=all_scores, uncertainty=all_uncertainties, label=all_labels) + else: + results = np.load(result_file, allow_pickle=True) + all_scores = results['score'] + all_uncertainties = results['uncertainty'] + all_labels = results['label'] # (N2,) + + # evaluation on bias/unbiased closed set + top_k_acc = top_k_accuracy(all_scores, all_labels, (1, 5)) + print('Evaluation Results:\ntop1_acc: %.5lf, top5_acc: %.5lf\n'%(top_k_acc[0], top_k_acc[1])) + + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + set_deterministic(1234) + + main() diff --git a/Downstream/Open-Set-Action-Recognition/experiments/evaluate_calibration.py b/Downstream/Open-Set-Action-Recognition/experiments/evaluate_calibration.py new file mode 100644 index 0000000..f9b8c8c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/evaluate_calibration.py @@ -0,0 +1,158 @@ +import argparse, os +import numpy as np +import matplotlib.pyplot as plt + + +def eval_calibration(predictions, confidences, labels, M=15): + """ + M: number of bins for confidence scores + """ + num_Bm = np.zeros((M,), dtype=np.int32) + accs = np.zeros((M,), dtype=np.float32) + confs = np.zeros((M,), dtype=np.float32) + for m in range(M): + interval = [m / M, (m+1) / M] + Bm = np.where((confidences > interval[0]) & (confidences <= interval[1]))[0] + if len(Bm) > 0: + acc_bin = np.sum(predictions[Bm] == labels[Bm]) / len(Bm) + conf_bin = np.mean(confidences[Bm]) + # gather results + num_Bm[m] = len(Bm) + accs[m] = acc_bin + confs[m] = conf_bin + conf_intervals = np.arange(0, 1, 1/M) + return accs, confs, num_Bm, conf_intervals + +def add_identity(axes, *line_args, **line_kwargs): + identity, = axes.plot([], [], *line_args, **line_kwargs) + def callback(axes): + low_x, high_x = axes.get_xlim() + low_y, high_y = axes.get_ylim() + low = max(low_x, low_y) + high = min(high_x, high_y) + identity.set_data([low, high], [low, high]) + callback(axes) + axes.callbacks.connect('xlim_changed', callback) + axes.callbacks.connect('ylim_changed', callback) + return axes + + +def compute_eavuc(preds, labels, confs, uncertainties): + eavuc = 0 + inds_accurate = np.where(preds == labels)[0] + eavuc += -np.sum(confs[inds_accurate] * np.log(1 - uncertainties[inds_accurate])) + inds_inaccurate = np.where(preds != labels)[0] + eavuc += -np.sum((1 - confs[inds_inaccurate]) * np.log(uncertainties[inds_inaccurate])) + return eavuc + + +def closedset_multicls(ind_results, ind_labels, ind_confidences, ind_uncertainties): + ind_preds = ind_results.copy() + accs, confs, num_Bm, conf_intervals = eval_calibration(ind_preds, 1-ind_uncertainties, ind_labels, M=args.M) + # compute the EAvUC + eavuc = compute_eavuc(ind_preds, ind_labels, ind_confidences, ind_uncertainties) + # compute ECE + ece = np.sum(np.abs(accs - confs) * num_Bm / np.sum(num_Bm)) + return ece, eavuc, conf_intervals, accs + + +def openset_multicls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties): + ind_preds = ind_results.copy() + ood_preds = ood_results.copy() + ind_preds[ind_uncertainties > args.threshold] = args.ind_ncls + ood_preds[ood_uncertainties > args.threshold] = args.ind_ncls + preds = np.concatenate((ind_preds, ood_preds), axis=0) + labels = np.concatenate((ind_labels, np.ones_like(ood_labels) * args.ind_ncls), axis=0) + confs = np.concatenate((ind_confidences, ood_confidences), axis=0) + unctns = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0) + # compute the EAvUC + eavuc = compute_eavuc(preds, labels, confs, unctns) + # compute ECE + accs, confs, num_Bm, conf_intervals = eval_calibration(preds, 1-unctns, labels, M=args.M) + ece = np.sum(np.abs(accs - confs) * num_Bm / np.sum(num_Bm)) + return ece, eavuc, conf_intervals, accs + + +def openset_bincls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties): + ind_preds = ind_results.copy() + ood_preds = ood_results.copy() + ind_preds[ind_uncertainties > args.threshold] = 1 + ind_preds[ind_uncertainties <= args.threshold] = 0 + ood_preds[ood_uncertainties > args.threshold] = 1 + ood_preds[ood_uncertainties < args.threshold] = 0 + preds = np.concatenate((ind_preds, ood_preds), axis=0) + labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)), axis=0) + confs = np.concatenate((ind_confidences, ood_confidences), axis=0) + unctns = np.concatenate((ind_uncertainties, ood_uncertainties), axis=0) + # compute the EAvUC + eavuc = compute_eavuc(preds, labels, confs, unctns) + # compute ECE + accs, confs, num_Bm, conf_intervals = eval_calibration(preds, 1-unctns, labels, M=args.M) + ece = np.sum(np.abs(accs - confs) * num_Bm / np.sum(num_Bm)) + return ece, eavuc, conf_intervals, accs + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--ood_result', help='the result file of ood detection') + parser.add_argument('--M', type=int, default=15, help='The number of bins') + parser.add_argument('--ind_ncls', type=int, default=101, help='the number classes for in-distribution data') + parser.add_argument('--threshold', type=float, help='the threshold to decide if it is an OOD') + parser.add_argument('--save_prefix', help='the image file path of generated calibration figure') + parser.add_argument('--draw_diagram', action='store_true', help='if to draw reliability diagram.') + args = parser.parse_args() + + results = np.load(args.ood_result, allow_pickle=True) + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + if 'ind_conf' not in results: + ind_confidences = 1 - ind_uncertainties + ood_confidences = 1 - ood_uncertainties + else: + ind_confidences = results['ind_conf'] + ood_confidences = results['ood_conf'] + + # result path + result_path = os.path.dirname(args.save_prefix) + if not os.path.exists(result_path): + os.makedirs(result_path) + + # Closed Set: (K class) + ece, eavuc, conf_intervals, accs = closedset_multicls(ind_results, ind_labels, ind_confidences, ind_uncertainties) + print('The Closed Set (K class) ECE=%.3lf, EAvUC=%.3lf'%(ece, eavuc)) + + # Open Set: K+1 class + ece, eavuc, conf_intervals, accs = openset_multicls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties) + print('The Open Set (K+1 class) ECE=%.3lf, EAvUC=%.3lf'%(ece, eavuc)) + + # Open Set: 2 class + ece, eavuc, conf_intervals, accs = openset_bincls(ind_results, ood_results, ind_labels, ood_labels, ind_confidences, ood_confidences, ind_uncertainties, ood_uncertainties) + print('The Open Set (2-class) ECE=%.3lf, EAvUC=%.3lf'%(ece, eavuc)) + + if args.draw_diagram: + # plot the ECE figure + fig, ax = plt.subplots(figsize=(4,4)) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 15 + plt.bar(conf_intervals, accs, width=1/args.M, linewidth=1, edgecolor='k', align='edge', label='Outputs') + plt.bar(conf_intervals, np.maximum(0, conf_intervals - accs), bottom=accs, color='y', width=1/args.M, linewidth=1, edgecolor='k', align='edge', label='Gap') + plt.text(0.1, 0.6, 'ECE=%.4f'%(ece), fontsize=fontsize) + add_identity(ax, color='r', ls='--') + plt.xlim(0, 1) + plt.ylim(0, 1) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.xlabel('confidence', fontsize=fontsize) + plt.ylabel('accuracy', fontsize=fontsize) + plt.legend(fontsize=fontsize) + ax.set_aspect('equal', 'box') + plt.tight_layout() + plt.savefig(args.save_prefix + '_ind.png') + plt.savefig(args.save_prefix + '_ind.pdf') + diff --git a/Downstream/Open-Set-Action-Recognition/experiments/get_threshold.py b/Downstream/Open-Set-Action-Recognition/experiments/get_threshold.py new file mode 100644 index 0000000..2138dc6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/get_threshold.py @@ -0,0 +1,183 @@ +import argparse +import os +import os.path as osp +import torch +import mmcv +from mmaction.apis import init_recognizer +from mmcv.parallel import collate, scatter +from operator import itemgetter +from mmaction.datasets.pipelines import Compose +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel +import numpy as np +from scipy.special import xlogy +from tqdm import tqdm + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model and data config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--uncertainty', default='BALD', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method') + parser.add_argument('--train_data', help='the split file of in-distribution training data') + parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes') + parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size') + # env config + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + args = parser.parse_args() + return args + +def apply_dropout(m): + if type(m) == torch.nn.Dropout: + m.train() + +def update_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + +def compute_uncertainty(predictions, method='BALD'): + """Compute the entropy + scores: (B x C x T) + """ + expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,) + entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes) + if method == 'Entropy': + uncertain_score = entropy_expected_p + elif method == 'BALD': + expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar) + uncertain_score = entropy_expected_p - expected_entropy + else: + raise NotImplementedError + if not np.all(np.isfinite(uncertain_score)): + uncertain_score[~np.isfinite] = 9999 + return uncertain_score + +def run_stochastic_inference(model, data_loader, forward_passes): + model = MMDataParallel(model, device_ids=[0]) + all_uncertainties = [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + all_scores = [] + with torch.no_grad(): + for n in range(forward_passes): + # set new random seed + update_seed(n * 1234) + scores = model(return_loss=False, **data) + # gather results + all_scores.append(np.expand_dims(scores, axis=-1)) + all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + # compute the uncertainty + uncertainty = compute_uncertainty(all_scores, method=args.uncertainty) + all_uncertainties.append(uncertainty) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + + return all_uncertainties + + +def run_evidence_inference(model, data_loader, evidence): + + # get the evidence function + if evidence == 'relu': + from mmaction.models.losses.edl_loss import relu_evidence as get_evidence + elif evidence == 'exp': + from mmaction.models.losses.edl_loss import exp_evidence as get_evidence + elif evidence == 'softplus': + from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence + else: + raise NotImplementedError + num_classes = model.cls_head.num_classes + + model = MMDataParallel(model, device_ids=[0]) + all_uncertainties = [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + output = model(return_loss=False, **data) + evidence = get_evidence(torch.from_numpy(output)) + alpha = evidence + 1 + uncertainty = num_classes / torch.sum(alpha, dim=1) + scores = alpha / torch.sum(alpha, dim=1, keepdim=True) + all_uncertainties.append(uncertainty.numpy()) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + + return all_uncertainties + + +def run_inference(): + + # build the recognizer from a config file and checkpoint file/url + model = init_recognizer( + args.config, + args.checkpoint, + device=device, + use_frames=False) + cfg = model.cfg + + if not args.uncertainty == 'EDL': + # use dropout in testing stage + if 'dnn' in args.config: + model.apply(apply_dropout) + if 'bnn' in args.config: + model.test_cfg.npass = 1 + # set cudnn benchmark + torch.backends.cudnn.benchmark = True + cfg.data.test.test_mode = True + cfg.data.videos_per_gpu = args.batch_size + evidence = cfg.get('evidence', 'exp') + + # We use training data to obtain the threshold + cfg.data.test.ann_file = args.train_data + cfg.data.test.data_prefix = os.path.join(os.path.dirname(args.train_data), 'videos') + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + dist=False, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + + # run inference + if not args.uncertainty == 'EDL': + all_uncertainties = run_stochastic_inference(model, data_loader, args.forward_pass) + else: + all_uncertainties = run_evidence_inference(model, data_loader, evidence) + return all_uncertainties + + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + + result_file = os.path.join(args.result_prefix + '_trainset_uncertainties.npz') + if not os.path.exists(result_file): + # run the inference on the entire training set (takes long time) + all_uncertainties = run_inference() + np.savez(result_file[:-4], uncertainty=all_uncertainties) + else: + result = np.load(result_file) + all_uncertainties = result['uncertainty'] + + # evaluation by macro-F1 within (C1 + 1) classes + uncertain_sort = np.sort(all_uncertainties)[::-1] # sort the uncertainties with descending order + N = all_uncertainties.shape[0] + topK = N - int(N * 0.95) + threshold = uncertain_sort[topK-1] + + print('The model %s uncertainty threshold on UCF-101 train set: %lf'%(args.result_prefix.split('/')[-1], threshold)) \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/get_threshold_dist.py b/Downstream/Open-Set-Action-Recognition/experiments/get_threshold_dist.py new file mode 100644 index 0000000..1b18b9c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/get_threshold_dist.py @@ -0,0 +1,280 @@ +import argparse +import os +import os.path as osp +import torch +import mmcv +from mmaction.apis import init_recognizer +from mmcv.parallel import collate, scatter +from operator import itemgetter +from mmaction.datasets.pipelines import Compose +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +import numpy as np +from scipy.special import xlogy +from tqdm import tqdm +import pdb +from mmaction.apis import collect_results_cpu +from mmcv.runner import get_dist_info, init_dist, load_checkpoint +#from mmcv.runner import init_dist, set_random_seed +from mmcv import Config, DictAction +from mmaction.models import build_model + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model and data config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--uncertainty', default='BALD', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method') + parser.add_argument('--train_data', help='the split file of in-distribution training data') + parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes') + parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size') + # env config + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--cfg-options', nargs='+', action=DictAction, default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increase''the inference speed') + args = parser.parse_args() + return args + +def apply_dropout(m): + if type(m) == torch.nn.Dropout: + m.train() + +def update_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + +def compute_uncertainty(predictions, method='BALD'): + """Compute the entropy + scores: (B x C x T) + """ + expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,) + entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes) + if method == 'Entropy': + uncertain_score = entropy_expected_p + elif method == 'BALD': + expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar) + uncertain_score = entropy_expected_p - expected_entropy + else: + raise NotImplementedError + if not np.all(np.isfinite(uncertain_score)): + uncertain_score[~np.isfinite] = 9999 + return uncertain_score + +def run_stochastic_inference(model, data_loader, forward_passes): + # model = MMDataParallel(model, device_ids=[0]) + model.eval() + all_uncertainties = [] + # prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + + for i, data in enumerate(data_loader): + all_scores = [] + with torch.no_grad(): + for n in range(forward_passes): + # set new random seed + update_seed(n * 1234) + # original code 69-76 + # scores = model(return_loss=False, **data) + # # gather results + # all_scores.append(np.expand_dims(scores, axis=-1)) + # all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + # # compute the uncertainty + # uncertainty = compute_uncertainty(all_scores, method=args.uncertainty) + # all_uncertainties.append(uncertainty) + #---vae--------------------------------- + # pdb.set_trace() + scores, recon = model(return_loss=False, **data) + uncertainty = recon + all_scores.append(np.expand_dims(scores, axis=-1)) + all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + all_uncertainties.append(uncertainty) + #----------------------------------------------- + + # use the first key as main key to calculate the batch size + # batch_size = len(next(iter(data.values()))) + # for _ in range(batch_size): + # prog_bar.update() + + if rank == 0: + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size * world_size): + prog_bar.update() + + all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None) + rank, _ = get_dist_info() + if rank == 0: + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + + return all_uncertainties + +def run_evidence_inference(model, data_loader, evidence): + + # get the evidence function + if evidence == 'relu': + from mmaction.models.losses.edl_loss import relu_evidence as get_evidence + elif evidence == 'exp': + from mmaction.models.losses.edl_loss import exp_evidence as get_evidence + elif evidence == 'softplus': + from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence + else: + raise NotImplementedError + num_classes = 101 + model.eval() + + # model = MMDataParallel(model, device_ids=[0]) + all_uncertainties = [] + # prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + + for i, data in enumerate(data_loader): + with torch.no_grad(): + # output = model(return_loss=False, **data) + output = model(return_loss=False, **data) + evidence = get_evidence(torch.from_numpy(output)) + alpha = evidence + 1 + uncertainty = num_classes / torch.sum(alpha, dim=1) + scores = alpha / torch.sum(alpha, dim=1, keepdim=True) + all_uncertainties.append(uncertainty.numpy()) + #---vae--------------------------------- + # output, recon = model(return_loss=False, **data) + # uncertainty = recon + # all_uncertainties.append(uncertainty) + #------------------------------------------ + # pdb.set_trace() + # use the first key as main key to calculate the batch size + # batch_size = len(next(iter(data.values()))) + # for _ in range(batch_size): + # prog_bar.update() + if rank == 0: + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size * world_size): + prog_bar.update() + all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None) + rank, _ = get_dist_info() + if rank == 0: + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + + return all_uncertainties + + +def run_inference(): + + # build the recognizer from a config file and checkpoint file/url + # model = init_recognizer( + # args.config, + # args.checkpoint, + # device=device, + # use_frames=False) + # cfg = model.cfg + + model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + load_checkpoint(model, args.checkpoint, map_location='cpu') + print('cpk is loaded:', args.checkpoint) + + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + + + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + + # if not args.uncertainty == 'EDL': + # # use dropout in testing stage + # if 'dnn' in args.config: + # model.apply(apply_dropout) + # if 'bnn' in args.config: + # model.test_cfg.npass = 1 + + # set cudnn benchmark + torch.backends.cudnn.benchmark = True + cfg.data.test.test_mode = True + cfg.data.videos_per_gpu = args.batch_size + evidence = cfg.get('evidence', 'exp') + + # We use training data to obtain the threshold + #pdb.set_trace() + cfg.data.test.ann_file = args.train_data + cfg.data.test.data_prefix = os.path.join(os.path.dirname(args.train_data), 'videos') + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + dist=distributed, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) + #pdb.set_trace() + data_loader = build_dataloader(dataset, **dataloader_setting) + + # run inference + if not args.uncertainty == 'EDL': + all_uncertainties = run_stochastic_inference(model, data_loader, args.forward_pass) + else: + all_uncertainties = run_evidence_inference(model, data_loader, evidence) + return all_uncertainties + + +if __name__ == '__main__': + + # args = parse_args() + # # assign the desired device. + # device = torch.device(args.device) + + # build the recognizer from a config file and checkpoint file/url + args = parse_args() + cfg = Config.fromfile(args.config) + cfg.merge_from_dict(args.cfg_options) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # pdb.set_trace() + result_file = os.path.join(args.result_prefix + '_trainset_uncertainties.npz') + if not os.path.exists(result_file): + # run the inference on the entire training set (takes long time) + #pdb.set_trace() + all_uncertainties = run_inference() + np.savez(result_file[:-4], uncertainty=all_uncertainties) + else: + result = np.load(result_file) + all_uncertainties = result['uncertainty'] + + # evaluation by macro-F1 within (C1 + 1) classes + # pdb.set_trace() + rank, _ = get_dist_info() + if rank == 0: + # uncertain_sort = np.sort(all_uncertainties)[::-1] # sort the uncertainties with descending order + uncertain_sort = np.sort(all_uncertainties.squeeze()) + #uncertain_sort = (uncertain_sort - np.min(uncertain_sort)) / (np.max(uncertain_sort) - np.min(uncertain_sort)) # normalize + + N = all_uncertainties.shape[0] + topK = N - int(N * 0.95) + print(uncertain_sort) + print(uncertain_sort.min()) + print(uncertain_sort.max()) + print(len(uncertain_sort)) + threshold = uncertain_sort[topK-1] + + print('The model %s uncertainty threshold on UCF-101 train set: %lf'%(args.result_prefix.split('/')[-1], threshold)) diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_openmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_openmax.sh new file mode 100644 index 0000000..c2118c9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_openmax.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/i3d/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_openmax.py \ + --config configs/recognition/i3d/inference_i3d_dnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_dnn/latest.pth \ + --cache_mav_dist experiments/i3d/results_baselines/openmax/ucf101_mav_dist \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/i3d/results_baselines/openmax/I3D_OpenMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_rpl.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_rpl.sh new file mode 100644 index 0000000..554bc6f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_rpl.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_rpl.py \ + --config configs/recognition/i3d/inference_i3d_rpl.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_rpl/latest.pth \ + --train_data data/ucf101/ucf101_train_split_1_videos.txt \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ood_dataname ${OOD_DATASET} \ + --result_prefix experiments/i3d/results_baselines/rpl/RPL + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_softmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_softmax.sh new file mode 100644 index 0000000..f88f6b4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/baseline_i3d_softmax.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 + +case ${OOD_DATASET} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/i3d/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_softmax.py \ + --config configs/recognition/i3d/inference_i3d_dnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_dnn/latest.pth \ + --batch_size 4 \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/i3d/results_baselines/softmax/I3D_SoftMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_bnn_ucf101.sh new file mode 100644 index 0000000..827fd26 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_bnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/i3d/finetune_ucf101_i3d_bnn.py \ + work_dirs/i3d/finetune_ucf101_i3d_bnn/latest.pth \ + --out work_dirs/i3d/test_ucf101_i3d_bnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_dnn_ucf101.sh new file mode 100644 index 0000000..61d3542 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_dnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/i3d/finetune_ucf101_i3d_dnn.py \ + work_dirs/i3d/finetune_ucf101_i3d_dnn/latest.pth \ + --out work_dirs/i3d/test_ucf101_i3d_dnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..ba296d8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py \ + work_dirs/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias/latest.pth \ + --out work_dirs/i3d/test_ucf101_i3d_edlnokl_avuc_debias.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_edlnokl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_edlnokl_ucf101.sh new file mode 100644 index 0000000..7ceed5f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_edlnokl_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/i3d/finetune_ucf101_i3d_edlnokl.py \ + work_dirs/i3d/finetune_ucf101_i3d_edlnokl/latest.pth \ + --out work_dirs/i3d/test_ucf101_i3d_edlnokl.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_rpl_ucf101.sh new file mode 100644 index 0000000..ac86139 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/evaluate_i3d_rpl_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/i3d/finetune_ucf101_i3d_rpl.py \ + work_dirs/i3d/finetune_ucf101_i3d_rpl/latest.pth \ + --out work_dirs/i3d/test_ucf101_i3d_rpl.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_bnn_ucf101.sh new file mode 100644 index 0000000..63b5353 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_bnn_ucf101.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/finetune_ucf101_i3d_bnn.py \ + --work-dir work_dirs/i3d/finetune_ucf101_i3d_bnn \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_dnn_ucf101.sh new file mode 100644 index 0000000..74617b9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_dnn_ucf101.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/finetune_ucf101_i3d_dnn.py \ + --work-dir work_dirs/i3d/finetune_ucf101_i3d_dnn \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..00af2d0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py \ + --work-dir work_dirs/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_edlnokl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_edlnokl_ucf101.sh new file mode 100644 index 0000000..9494db9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_edlnokl_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/finetune_ucf101_i3d_edlnokl.py \ + --work-dir work_dirs/i3d/finetune_ucf101_i3d_edlnokl \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_rpl_ucf101.sh new file mode 100644 index 0000000..7a16c6b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/finetune_i3d_rpl_ucf101.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/finetune_ucf101_i3d_rpl.py \ + --work-dir work_dirs/i3d/finetune_ucf101_i3d_rpl \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_draw_confmat.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_draw_confmat.sh new file mode 100644 index 0000000..f938880 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_draw_confmat.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_PATH="experiments/i3d/results" + +# Confusion Matrix comparison +python experiments/draw_confusion_matrix.py \ + --ood_result ${RESULT_PATH}/I3D_EDLNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz \ + --uncertain_thresh 0.004550 \ + --save_file ${RESULT_PATH}/../results_confmat/I3D_DEAR_${OOD_DATA}_ConfMat.png + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_get_threshold.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_get_threshold.sh new file mode 100644 index 0000000..d2ffd22 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_get_threshold.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +MODEL=$2 +BATCHSIZE=$3 +TRAIN_DATA='data/ucf101/ucf101_train_split_1_videos.txt' +RESULT_DIR='experiments/i3d/results' + +case ${MODEL} in + dnn) + # get the BALD threshold for i3d model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/i3d/inference_i3d_dnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_dnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/I3D_DNN_BALD + ;; + bnn) + # get the BALD threshold for I3D_BNN model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/i3d/inference_i3d_bnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_bnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/I3D_BNN_BALD + ;; + edlnokl) + # get the EDL threshold for I3D_EDL model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/i3d/inference_i3d_enn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_edlnokl/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/I3D_EDLNoKL_EDL + ;; + edlnokl_avuc_debias) + # get the EDL threshold for I3D_EDL_AvUC_Debias model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/i3d/inference_i3d_enn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/I3D_EDLNoKLAvUCDebias_EDL + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_ood_detection.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_ood_detection.sh new file mode 100644 index 0000000..37e3196 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_ood_detection.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ +# DEVICE=$1 +OOD_DATASET=$1 +MODEL=$2 +GPUS=$3 +PORT=${PORT:-29500} +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + ;; + MiT) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/i3d/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/i3d/inference_i3d_dnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_dnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/I3D_DNN_BALD_${OOD_DATASET} + ;; + bnn) + # Evidential Deep Learning (without KL divergence loss term) + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/i3d/inference_i3d_bnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_bnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/I3D_BNN_BALD_${OOD_DATASET} + ;; + edlnokl) + # Evidential Deep Learning (without KL divergence loss term) + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/i3d/inference_i3d_enn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_edlnokl/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/I3D_EDLNoKL_EDL_${OOD_DATASET} + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT experiments/ood_detection_dist.py \ + --config configs/recognition/i3d/inference_i3d_enn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_edlnokl_avuc_ced/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/I3D_EDLNoKLAvUCCED_EDL_${OOD_DATASET} \ + --launcher pytorch + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_openness.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_openness.sh new file mode 100644 index 0000000..54552ad --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_openness.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +case ${OOD_DATA} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Invalid OOD Dataset: "${OOD_DATA} + exit + ;; +esac + +# OOD Detection comparison +python experiments/compare_openness.py \ + --base_model i3d \ + --baselines I3D_DNN_BALD I3D_BNN_BALD I3D_EDLNoKL_EDL I3D_EDLNoKLAvUCCED_EDL I3D_EDLNoKLAvUCDebias_EDL \ + --thresholds 0.000433 0.000004 0.004547 0.004550 0.004550 \ + --styles b k y c r\ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ind_ncls 101 \ + --result_png F1_openness_compare_${OOD_DATA}.png + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_openness_new.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_openness_new.sh new file mode 100644 index 0000000..87da539 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_openness_new.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_FILES=("results_baselines/openmax/I3D_OpenMax_${OOD_DATA}_result.npz" \ # openmax + "results/I3D_DNN_BALD_${OOD_DATA}_result.npz" \ # mc dropout + "results/I3D_BNN_BALD_${OOD_DATA}_result.npz" \ # bnn svi + "results_baselines/openmax/I3D_OpenMax_${OOD_DATA}_result.npz" \ # softmax + "results_baselines/rpl/I3D_RPL_${OOD_DATA}_result.npz" \ # rpl + "results/I3D_EDLNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz") # dear (ours) +THRESHOLDS=(-1 \ + 0.000433 \ + 0.000004 \ + 0.996825 \ + 0.995178 \ + 0.004550) + +# OOD Detection comparison +# The folders `results/` and `results_baselines` are in the `experiments/i3d/` folder. +python experiments/compare_openness_new.py \ + --base_model i3d \ + --ood_data ${OOD_DATA} \ + --baseline_results ${RESULT_FILES[@]} + +# OOD Detection comparison by using thresholds +echo 'Results by using thresholds:' +python experiments/compare_openness_new.py \ + --base_model i3d \ + --ood_data ${OOD_DATA} \ + --thresholds ${THRESHOLDS[@]} \ + --baseline_results ${RESULT_FILES[@]} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_reliability_evaluation.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_reliability_evaluation.sh new file mode 100644 index 0000000..85ef69d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/run_reliability_evaluation.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATASET=$1 +MODEL=$2 +RESULT_DIR='experiments/i3d/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/I3D_DNN_BALD_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/I3D_DNN_BALD_${OOD_DATASET}_reliability + ;; + bnn) + # Evidential Deep Learning (without KL divergence loss term) + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/I3D_BNN_BALD_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/I3D_BNN_BALD_${OOD_DATASET}_reliability + ;; + edlnokl) + # Evidential Deep Learning (without KL divergence loss term) + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/I3D_EDLNoKL_EDL_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/I3D_EDLNoKL_EDL_${OOD_DATASET}_reliability + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/I3D_EDLNoKLAvUCCED_EDL_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/I3D_EDLNoKLAvUCCED_EDL_${OOD_DATASET}_reliability + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/train_i3d_DEAR_kinetics10.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/train_i3d_DEAR_kinetics10.sh new file mode 100644 index 0000000..95eaaa0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/train_i3d_DEAR_kinetics10.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/train_kinetics10_i3d_DEAR.py \ + --work-dir work_dirs/i3d/train_kinetics10_i3d_DEAR \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/i3d/train_i3d_DEAR_noDebias_kinetics10.sh b/Downstream/Open-Set-Action-Recognition/experiments/i3d/train_i3d_DEAR_noDebias_kinetics10.sh new file mode 100644 index 0000000..caa6748 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/i3d/train_i3d_DEAR_noDebias_kinetics10.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/i3d/train_kinetics10_i3d_DEAR_noDebias.py \ + --work-dir work_dirs/i3d/train_kinetics10_i3d_DEAR_noDebias \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/COPYRIGHT_Libmr.txt b/Downstream/Open-Set-Action-Recognition/experiments/libMR/COPYRIGHT_Libmr.txt new file mode 100644 index 0000000..084f881 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/COPYRIGHT_Libmr.txt @@ -0,0 +1,98 @@ +SOURCE CODE LICENSE AGREEMENT +PREAMBLE + +This SOFTWARE implements concepts of statistical Meta-recognition for which Securics/Univ. of Colorado have a pending patent (CU TTO File CU2338C). Securics and University of Colorado, hereafter The Owners, have joint interest in the invention and the software. Securics currently holds the excusive license commercial to both the patent and the code. Securcs, hereafter the Licensor, is offering a non-exclusive right to use for non-commecial use. +This license agreement allows you to use the source code for personal or non profit purposes. This includes any use that does not involve making money, and does not include uses like: +• deploying the software for use by a for-profit organization +• providing a service to a paying customer +For-profit companies may not use this source code. If you work for a for-profit company, you may only use this software as an individual, for your personal use. + +This is a right to use license. It does not not include the right to redistribute copies. Non-profit users can only use the copies obtained from authroized sourcehs which include: securics.com, metarecognition.com or vast.uccs.edu. + +This license agreement also allows you to create derivative products for your own use, but does not permit re-distribute +of modified code in any form. You may choose to destribute patch files, which can be applied to officially distributed code. Any the derivative products, must be distributed under the same conditions as specified in this agreement unless a separate commercial license is obtained from Securics Inc or its designates. + +As a condition of using this source code, you agree not to assert any patents or copyrights against the owners or any of the Owners’ licensees for use of derivative products. Any derivative products must include a copy of license and instructions for accessing the orignal source. You must also include attribution to the authors in any publication that results from the use of this code or data derived from the code. Any papers/research/report based on results that uses this software must cite: + +@article{Scheirer_2011_TPAMI, +author = {Walter J. Scheirer and Anderson Rocha and Ross Michaels and Terrance E. Boult}, +title = {Meta-Recognition: The Theory and Practice of Recognition Score Analysis}, +journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI)}, +volume = {33}, +issue = {8}, +pages = {1689–1695}, +year = {2011} +} +Parts of this technology are subject to SBIR data rights and as described in DFARS 252.227-7018 (June 1995) SBIR Data Rights which apply to Contract Number: N00014-11-C-0243 and STTR N00014-07-M-0421 to Securics Inc, 1870 Austin Bluffs Parkway, Colorado Springs, CO 80918 + +The Government’s rights to use, modify, reproduce, release, perform, display, or disclose technical data or computer software marked with this legend are restricted during the period shown as provided in paragraph (b)(4) of the Rights in Noncommercial Technical Data and Computer Software-Small Business Innovative Research (SBIR) Program clause contained in the above identified contract. Expiration of SBIR Data Rights: Expires four years after completion of the above cited project work for this or any other follow-on SBIR contract, whichever is later. + +No restrictions on government use apply after the expiration date shown above. Any reproduction of technical data, computer software, or portions thereof marked with this legend must also reproduce the markings. + +This license includes other conditions that should be read carefully. This SOFWARE usage agreement (the “Agreement”) applies to the libMR and is between YOU and the Licensor + +1. DEFINITIONS + +“Software” means all or any portion of the human-readable source code files of the software programs including without limitation, associated flow charts, algorithms, comments and other written instructions and technical documentation, and all corrections, updates, and new versions incorporated into such programs. + +“Derivative Work” means a work based upon the Software, such as a revision, modification, translation, abridgement, condensation, expansion, collection, compilation, or any other form in which the Software may be recast, transformed, adapted, or distributed as a part of a larger work and which, if prepared without proper authorization would constitute a copyright infringement. If identifiable sections of that work are not derived from the Software, and can be reasonably considered independent and separate works in themselves, then they are not considered Derivative Work. + +“Personal Use” means use of Software and/or Derivative Work by an individual solely for his or her personal, private and non-commercial use. An individual’s use in his or her capacity as an officer, employee, member, independent contractor or agent of a corporation, business or organization does not qualify as Personal Use. + +“You” or “Your” means an individual or a legal entity exercising rights under this License. For legal entities, “You” or “Your” includes any non-profit entity which controls, is controlled by, or is under common control with, You, where “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of fifty percent (50%) or more of the beneficial ownership of such entity. + +2. GRANT OF LICENSE: + +WHEREAS, the Licensor, desires to aid the academic and non-commercial research community and raise awareness of the PATENTED INVENTION and thereby agrees to grant a limited copyright license to the SOFTWARE for research and non-commercial purposes only, with the Owners retaining all ownership rights in the PATENTED INVENTION and the SOFTWARE; + +THEREFORE: +The Licensor grants, and You accept, a personal, nonexclusive, nontransferable license: + +a) to use Software, at no charge, in accordance with the terms herein, solely for (i) Personal Use, or (ii) academic or non-commercial research, development and deployment; and + +b) to develop Derivative Works that may be used solely for (i) Personal Use or (ii) academic or non-commercial research, development and deployment; and + +c) to copy, distribute and sublicense Software and Derivative Works solely in accordance with the terms herein. Any Software or Derivative Works distributed shall be pursuant to a license agreement that contains all of the terms herein; and shall contain prominent notices stating how the Software, Derivative Works, or documentation was changed, the author and date of any such change and require acknowledgement of the orginal software/publicaitons by any users of the Derivative Works. + +d) You acknowledge that the Software is a valuable, proprietary asset of The Owners. You shall not market or sell the Software or Derivative Works. + +3. LICENSE EXCLUSIONS + +a) EXCEPT AS EXPRESSLY PROVIDED HEREIN, YOU SHALL MAKE NO OTHER USE OF THE SOFTWARE. + +b) You must obtain permission from The Licensor before receiving payment for distribution of or services using the Software or Derivative Works. + +c) You shall not allege or enjoin infringement or misappropriation by The Licensor in any Derivative Works, or by any third party obtaining Derivative Works, prepared by The Licensor and under license from The Licensor. +4. TITLE AND PROTECTION OF SOFTWARE + +a) The Owners retains all title, right and interest to the Software and the underlying patents. + +b) Except for the Software, You retain all title, right and interest to the Derivative Works, subject to the terms of this Agreement. + +5. NO REPRESENTATIONS + +THE OWNERS DISCLAIMS ALL OTHER REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +6. ATTRIBUTION + +a) You agree to retain and reproduce in all copies of Software the copyright and other proprietary notices and disclaimers as they appear in the Software, and keep intact all notices in the Software that refer to this License. + +b) You agree to provide attribution to the authors of this Software in any article based on research performed using Software or Derivative Works or with any distribution of Software or Derivative works. + +7. DEFAULT + +If YOU fail to perform any of its obligations under this Agreement, The Licensor, in addition to any other rights available to it under law or equity, may terminate this Agreement and the licenses granted hereunder by written notice to You. Unless otherwise provided in this Agreement, remedies shall be cumulative and there shall be no obligation to exercise a particular remedy. + +8. TERMINATION + +a) In the event that this Agreement is terminated, any sublicenses granted or Derivative Works distributed by Licensee shall remain in full force and effect. + +b) Within thirty (30) days of termination, You shall return to The Licensor or certify in writing to The Licensor that all copies or partial copies of Software in Your possession or control have been destroyed. c) In addition to this section, the sections entitled “Title and Protection of Software “No Representations” and “Limitation of Liability” shall survive termination of this Agreement. + +9. GENERAL +a) No agency, partnership or employment is created by this Agreement. + +b) You may not use any of The Owners’ names, the terms in Meta-Recognition, or W-score in any advertising, public relations or media release without the prior written consent of the Owner. + +c) This Agreement shall be governed by the laws of the State of Colorado. Venue for any action or proceeding shall be Denver, Colorado. This Agreement constitutes the entire agreement between the parties and may only be modified by a written instrument signed by each parties authorized officers. + +If you accept this license please opt-in for and you will receive email with instructions. The email will also be used for update emails on future changes to the code. diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/Makefile b/Downstream/Open-Set-Action-Recognition/experiments/libMR/Makefile new file mode 100644 index 0000000..8de7513 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/Makefile @@ -0,0 +1,8 @@ +CXX= g++ +SRC= MetaRecognition.cpp weibull.c + +libmr: $(SRC) weibull.h malloc.h MetaRecognition.h + $(CXX) -o libmr $(SRC) -I. + +clean: + rm -f *~ *.o libmr \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/MetaRecognition.cpp b/Downstream/Open-Set-Action-Recognition/experiments/libMR/MetaRecognition.cpp new file mode 100644 index 0000000..b423be0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/MetaRecognition.cpp @@ -0,0 +1,463 @@ +/* + * MetaRecognition.cpp + * Copyright 2011, Securics Inc. + See accompanying LICENSE agrement for details on rights. + +Parts of this technology are subject to SBIR data rights and as described in DFARS 252.227-7018 (June 1995) SBIR Data Rights which apply to Contract Number: N00014-11-C-0243 and STTR N00014-07-M-0421 to Securics Inc, 1870 Austin Bluffs Parkway, Colorado Springs, CO 80918 + +The Government's rights to use, modify, reproduce, release, perform, display, or disclose technical data or computer software marked with this legend are restricted during the period shown as provided in paragraph (b)(4) of the Rights in Noncommercial Technical Data and Computer Software-Small Business Innovative Research (SBIR) Program clause contained in the above identified contract. Expiration of SBIR Data Rights: Expires four years after completion of the above cited project work for this or any other follow-on SBIR contract, whichever is later. + +No restrictions on government use apply after the expiration date shown above. Any reproduction of technical data, computer software, or portions thereof marked with this legend must also reproduce the markings. + * +*/ + +/** \mainpage + + + This library provides support for meta-recognition, i.e. recognizing when a recognition system is working well and when it is not and using that self-knowledge to improve the system. It can be used for prediction of failure, fusion, score renormalization, SVM renormalization and converting SVM or recognition scores into statistially well supported probility estimtes. The analysis is based on an analysis of the recognition system scores. + + +The fundamental ideas are described in + +"Meta-Recognition: The Theory and Practice of Recognition Score Analysis," +Walter J. Scheirer, Anderson Rocha, Ross Micheals, Terrance E. Boult, +IEEE Transactions on Pattern Analysis and Machine Intelligence (T-PAMI), +33(8), pp 1689--1695, Aug, 2011. + +and SVM support as described in + +"Multi-Attribute Spaces: Calibration for Attribute Fusion and Similarity Search," +Walter J. Scheirer, Neeraj Kumar, Peter N. Belhumeur, Terrance E. Boult, +Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), +June 2012. + + +The underlying extream value theory provide stong theortical basis for the computations, but to make it useful one must transform the data into the proper frame. The C++ version provides objects that can compute and store information about the transform and then provide for prediction, w-score values (probability estimates), or renormalizatoin of a vector of data. + + The library also contains a "C" interface functions for very basic weilbull usage for Meta-Recognition. + The C-based library has a number of STRONG assumptions you must follow as we cannot test for all of them. + 1) All fitting and testing are presuming "larger is better", If you are fitting something where smaller is better you need to transform it before fitting. + 2) All data is positive (okay we can and do test for that, but better to know upfront what you are doing) + 3) There must be sufficient range in your data to actually fit the weilbull. If all the data is the same, or nearly the same, it may fal to converge and will report errors. + + While free for non-commercial use this library is subject to the license restrictions, see LICENSE.TXT for details. + +*/ + +#include "MetaRecognition.h" +#include +//#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +extern int weibull_fit_verbose_debug; +#ifdef __cplusplus +} +#endif +MetaRecognition::MetaRecognition(int scores_to_dropx, int fitting_sizex, bool verb, double alphax, int translate_amountx): + scores_to_drop(scores_to_dropx),verbose(verb),fitting_size(fitting_sizex),alpha(alphax),translate_amount(translate_amountx) +{ + memset(parmhat,0,sizeof(parmhat)); + memset(parmci,0,sizeof(parmci)); + sign = 1; + ftype = complement_reject; + small_score=0; + isvalid=false; + if(verb) weibull_fit_verbose_debug=1; + else weibull_fit_verbose_debug=0; +} + +MetaRecognition::~MetaRecognition() +{ + // free(parmhat); + // free(parmci); +} + +bool MetaRecognition::is_valid(){ + return isvalid; +} + +void MetaRecognition::set_translate(double t){ + translate_amount = t; + isvalid=false; +}; + + +void MetaRecognition::Reset(){ + memset(parmhat,0,sizeof(parmhat)); + memset(parmci,0,sizeof(parmci)); + sign = 1; + scores_to_drop = 0; + small_score=0; + isvalid=false; +} + + +int compare_sort_decending (const void * a, const void * b) +{ + const double *da = (const double *) a; + const double *db = (const double *) b; + return (*da < *db) - (*da > *db); +} + +int compare_sort_assending (const void * a, const void * b) +{ + const double *da = (const double *) a; + const double *db = (const double *) b; + return (*da > *db) - (*da < *db); +} + +inline const char * const BoolToString(bool b) +{ + return b ? "true" : "false"; +} + +inline int const BoolToInt(bool b) +{ + return b ? 1 : 0; +} + +inline const bool IntToBool(const char * s) +{ + int val= atoi(s); + if(val) return true; + else return false; +} + +//Wraps calls to real weibull_inv and weibull_cdf functions and handles properly translating the data passed +//May eventually be a good idea to move real implementations of the functions here +//IF we do away with the C implementation. For now this allows for backward compantiblity with +//older code +// Inv computes the scores of the inverse CDF, i.e. returns y such that CDF(y) = +double MetaRecognition::Inv(double x) +{ + if(!isvalid) return -9999.0; + double score = weibull_inv(x, parmhat[0], parmhat[1]); + return (score - translate_amount + small_score)*sign; +} + +double MetaRecognition::CDF(double x) +{ + if(!isvalid) return -9999.0; + double translated_x = x*sign + translate_amount - small_score; + double wscore=weibull_cdf(translated_x, parmhat[0], parmhat[1]); + if(ftype==complement_model || ftype==positive_model) return 1-wscore; + return wscore; +}; + +double MetaRecognition::W_score(double x){ + return CDF(x); +}; + +bool MetaRecognition::Predict_Match(double x, double threshold){ + double score = Inv(threshold); + if(sign <0) return (x < score); + return (x > score); +}; + +int MetaRecognition::ReNormalize(double *invec, double *outvec, int length) +{ + if(!isvalid) return -9997.0; + int rval=1; + for(int i=0; i< length; i++){ + outvec[i] = W_score(invec[i]); + } + return rval; +} + + +//used by weibull__evt_low and weibull__evt_high, which sets the desired sign(low -1, high 1) +//before passing to generic +int MetaRecognition::EvtGeneric(double* inputData, int inputDataSize, int inward, double x) +{ + double * inputDataCopy = (double *) malloc(sizeof(double) * inputDataSize); + + double * dataPtr = NULL; + int icnt=0; + if(!inward && (sign > 0) ) { + icnt = inputDataSize; + memcpy(inputDataCopy,inputData, inputDataSize*sizeof(double)); + } + if(!inward && (sign < 0) ){ + for(int i=0; i < inputDataSize; i++) inputDataCopy[i] = (inputData[i]*sign); //doing extremes just flip sign if needed + icnt = inputDataSize; + } + else if(inward && (sign < 0)) { /* this is fit above x but approaching x */ + for(int i=0; i < inputDataSize; i++) { + if(inputData[i] > x) { + inputDataCopy[icnt++] = (inputData[i]*sign); //copy what is above x, and flip signs (so biggest is important) + } + } + } else if(inward && (sign > 0)) { /* this is fit below x but approaching x */ + for(int i=0; i < inputDataSize; i++) { + if(inputData[i] < x) { + inputDataCopy[icnt++] = (inputData[i]); //copy only what is above x. + } + } + } + + //sort data and get smallest score + qsort(inputDataCopy, icnt , sizeof(double), compare_sort_decending); + + //Want only the top fitting_size scores but als noneed to adap if dropping top score + if(scores_to_drop>0){ + dataPtr=inputDataCopy+scores_to_drop; + } else { + dataPtr=inputDataCopy; + } + + small_score = dataPtr[fitting_size-1]; + + for(int i=0; i < fitting_size; i++) + { + //translate and subtract small score + dataPtr[i] = dataPtr[i] + translate_amount - small_score; + } + + + int rval = weibull_fit(parmhat, parmci, dataPtr, alpha, fitting_size); + isvalid= true; + if(rval != 1) Reset(); + free(inputDataCopy); + return rval; +} + +//Wrapper fitting functions EvtLow and EvtHigh to make it simpler for new users of the library. +int MetaRecognition::FitLow(double* inputData, int inputDataSize, int fsize) +{ + if(fsize>0) fitting_size=fsize; + sign = -1; + return EvtGeneric(inputData, inputDataSize); +} + +int MetaRecognition::FitHigh(double* inputData, int inputDataSize, int fsize) +{ + if(fsize>0) fitting_size=fsize; + sign = 1; + return EvtGeneric(inputData, inputDataSize); +} + +int MetaRecognition::FitSVM(svm_node_libsvm* SVMdata, int inputDataSize, int label_of_interest, bool label_has_positive_score, int fit_type, int fit_size ) +{ + + Reset(); + ftype = (MR_fitting_type)fit_type; + fitting_size = fit_size; + double * inputDataCopy = (double *) malloc(sizeof(double) * inputDataSize); + int sign_of_label_of_interest=0; + double * dataPtr = NULL; + int sign_of_expected_score=-1; + if(label_has_positive_score) sign_of_expected_score=1; + + int icnt=0; + bool rejection=(ftype==complement_reject || ftype == positive_reject); + if(rejection) { // default we fit on the complement class and then do rejection to determine probability + for(int i=0; i < inputDataSize; i++) { + if(SVMdata[i].index != label_of_interest) inputDataCopy[icnt++] = (SVMdata[i].value); //doing extremes just flip sign if needed + else { + if(SVMdata[i].value >0) sign_of_label_of_interest++; + else sign_of_label_of_interest--; + } + } + } else { + for(int i=0; i < inputDataSize; i++) { + if(SVMdata[i].index == label_of_interest) inputDataCopy[icnt++] = (SVMdata[i].value); //doing extremes just flip sign if needed + else { + if(SVMdata[i].value >0) sign_of_label_of_interest++; + else sign_of_label_of_interest--; + } + } + } + if (verbose && sign_of_label_of_interest * sign_of_expected_score > 0){ + printf("In MetaRecognition::FitSVM, warning: possible inconsistency average of the non-matching data has score %d, but expected sign is %d\n", + sign_of_label_of_interest, -sign_of_expected_score); + } + + + /* expected sign combines with reject_complement to determine if we have to flip or not. + We flip if positives scores, with smaller is better, is the goal, + we flip if sign_of_expected_score >0 and !force_rejection + we flip if sign_of_expected_score <0 and force_rejection */ + + if((!label_has_positive_score && rejection) + || (label_has_positive_score && !rejection)) { + sign = -1; + for(int i=0; i < icnt; i++) { + inputDataCopy[i] *= -1; //doing extremes just flip sign if needed + } + } else sign=1; + + //sort data and get smallest score + qsort(inputDataCopy, icnt , sizeof(double), compare_sort_decending); + + //Want only the top fitting_size scores but als noneed to adap if dropping top score + if(scores_to_drop){ + dataPtr=inputDataCopy+scores_to_drop; + } else { + dataPtr=inputDataCopy; + } + + small_score = dataPtr[fitting_size - 1]; + + for(int i=0; i < fitting_size; i++) + { + //translate and subtract small score + dataPtr[i] = dataPtr[i] + translate_amount - small_score; + } + + int rval = weibull_fit(parmhat, parmci, dataPtr, alpha, fitting_size); + + isvalid= true; + if(rval != 1) Reset(); + free(inputDataCopy); + printf("Completed weibull fitting\n"); + return rval; +}; + +void MetaRecognition::Save(std::ostream &outputStream) const +{ + if(outputStream.good() && isvalid) + { + try { + outputStream.precision(21); + outputStream.setf(std::ios::scientific,std::ios::floatfield); + outputStream << parmhat[0] << " " << parmhat[1] << " " + << parmci[0] << " " << parmci[1] << " " + << parmci[2] << " " << parmci[3] << " " + << sign << " " + << alpha << " " + << (int) ftype << " " + << fitting_size << " " + << translate_amount << " " + << small_score<< " " + << scores_to_drop + << std::endl; + } catch(std::bad_alloc& e) { + std::cout << "Could not allocate the required memory, failed with error: '" << e.what() << "'" << std::endl; + } + } +} + +std::ostream& operator<< ( std::ostream& os, const MetaRecognition& mr ) + { + mr.Save(os); + return os; + } + +std::istream& operator>> ( std::istream& is, MetaRecognition& mr ) + { + mr.Load(is); + return is; + } + + +void MetaRecognition::Load(std::istream &inputStream) +{ + isvalid=false; + int temp; + if(inputStream.good()) + { + int iftype; + inputStream >> parmhat[0] >> parmhat[1] + >> parmci[0] >> parmci[1] + >> parmci[2] >> parmci[3] + >> sign + >> alpha + >> iftype + >> fitting_size + >> translate_amount + >> small_score + >> scores_to_drop; + isvalid=true; + ftype = (MR_fitting_type) iftype; + } +} + +void MetaRecognition::Save(FILE *outputFile) const +{ + if((outputFile != NULL) && !feof(outputFile)) + { + fprintf(outputFile, + "%21.18g %21.18g " //parmaht + "%21.18g %21.18g " //parmci + "%21.18g %21.18g " + "%d %f %d %d " //sign, alpha, fitting size + "%d %21.18g %d\n", //translate, small_score, scores_to_drop + parmhat[0], parmhat[1], + parmci[0],parmci[1], + parmci[2],parmci[3], + sign, alpha, (int) ftype,fitting_size, + translate_amount, small_score, scores_to_drop); + } +} + +void MetaRecognition::Load(FILE *inputFile) +{ + int temp, iftype; + int retcode=0; + isvalid=false; + if((inputFile != NULL) && !feof(inputFile)) + { + + retcode = fscanf(inputFile, + "%lf %lf " //parmaht + "%lf %lf " //parmci + "%lf %lf " + "%d %lf %d %d " //sign, alpha, fitting size + "%d %lf %d ", //translate, small_score, scores_to_drop, + parmhat, parmhat+1, + parmci,parmci+1, + parmci+2,parmci+3, + &sign, &alpha, &iftype, &fitting_size, + &translate_amount, &small_score, &scores_to_drop); + isvalid=true; + ftype = (MR_fitting_type) iftype; + } +} + + +void MetaRecognition::Save(char* filename) const +{ + FILE* fp = fopen(filename,"w"); + if(fp) { + Save(fp); + fclose(fp); + } else if(strlen(filename)>0) + fprintf(stderr,"SaveWeibull could not open file |%s|\n",filename); + else fprintf(stderr,"SaveWeibull called with null filename\n"); +} + +void MetaRecognition::Load(char* filename){ + FILE* fp = fopen(filename,"r"); + isvalid=false; + if(fp) { + Load(fp); + isvalid=true; + fclose(fp); + } else if(strlen(filename)>0) + fprintf(stderr,"LoadWeibull could not open file |%s|\n",filename); + else fprintf(stderr,"LoadWeibull called with null filename\n"); + +} + +std::string MetaRecognition::to_string() { + std::stringstream oss; + this->Save(oss); + return oss.str(); +} +void MetaRecognition::from_string(std::string input) { + std::stringstream iss(input); + this->Load(iss); +} + + +int MetaRecognition::set_fitting_size(int nsize){ isvalid=false; return fitting_size=nsize;} +int MetaRecognition::get_fitting_size(){ return fitting_size;} +int MetaRecognition::get_translate_amount(){ return translate_amount;} +int MetaRecognition::set_translate_amount(int ntrans) {isvalid=false; return translate_amount=ntrans;} +double MetaRecognition::get_small_score(){return small_score;} +double MetaRecognition::set_small_score(double nscore){isvalid=false; return small_score=nscore;} +int MetaRecognition::get_sign(){return sign;} +int MetaRecognition::set_sign(int nsign){return sign=nsign;} diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/MetaRecognition.h b/Downstream/Open-Set-Action-Recognition/experiments/libMR/MetaRecognition.h new file mode 100644 index 0000000..76088ec --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/MetaRecognition.h @@ -0,0 +1,148 @@ +/** + * MetaRecognition.h: + + * @Author Terry Boult tboult at securics com + * @Author Vijay Iyer viyer at securics com + + * + * Copyright 2010, 2011, Securics Inc. + + * Copyright 2011, Securics Inc. + See accompanying LICENSE agrement for details on rights. + +Parts of this technology are subject to SBIR data rights and as described in DFARS 252.227-7018 (June 1995) SBIR Data Rights which apply to Contract Number: N00014-11-C-0243 and STTR N00014-07-M-0421 to Securics Inc, 1870 Austin Bluffs Parkway, Colorado Springs, CO 80918 + +The Government's rights to use, modify, reproduce, release, perform, display, or disclose technical data or computer software marked with this legend are restricted during the period shown as provided in paragraph (b)(4) of the Rights in Noncommercial Technical Data and Computer Software-Small Business Innovative Research (SBIR) Program clause contained in the above identified contract. Expiration of SBIR Data Rights: Expires four years after completion of the above cited project work for this or any other follow-on SBIR contract, whichever is later. + +No restrictions on government use apply after the expiration date shown above. Any reproduction of technical data, computer software, or portions thereof marked with this legend must also reproduce the markings. + * +*/ + +#pragma once +#ifndef MetaRecognition_H +#define MetaRecognition_H + + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include +#include +#include +#include +#include +#include + + +#include "weibull.h" + +#ifdef _WIN32 +#define DLLEXPORT _declspec(dllexport) +#else +#define DLLEXPORT +#endif + +#define MAX_LINE 256 + +/// structure for svm data used by libSVM, used to allow easy MetaRecognition for SVM results (used as an argument for MetaRecogniton::fitSVM) +struct svm_node_libsvm +{ + int index; //!< class label, classic is -1 for negative class add +1 for positive class, but can be general its for multi-class + double value;//!< the SVM decision score +}; + +/**! + Class MetaRecognition provides a object-based interface for Meta-Recognition. The object can be ... + + TBD + + +*/ +class DLLEXPORT MetaRecognition //! Primary object/methods for tranforming and computing needed for any Meta recogntion task +{ +public: + +/** Ctor, can call with no arguments (uses default arguments for construciton). + All space is on the stack. + Object will exist but is not valid until some fitting fucntion is called +*/ + + MetaRecognition( int scores_to_drop=0, //!< is this object for prediction, if so how many top scores to drop when fitting + int fitting_size=9, //!< tail size for fitting. With small data the defaults are fine.. if you have millions make it larger for better predictions + bool verbose = false, //!< is the code chatty on errors during fitting, useful for debugging + double alpha=5.0, //!< band for confidence interfals + int translate_amount=10000 //!< shifting data to ensure all is positive.. if data is very broad and you want some probabilities for all points you can make it larger.. + ); + + ~MetaRecognition(); + + bool is_valid(); //!< is this object valid..i.e. has data been properly fit to determine parameters. + void set_translate(double t); //!< Change translate_amount to x, invalidates object + + void Reset(); //!< reset to "invalid" state + + bool Predict_Match(double x, double threshold = .9999999); //!< Is X from the "match" distribution (i.e. we reject null hypothesis of non-match), + double W_score(double x); //!< This is the commonly used function.. after fitting, it returns the probability of the given score being "correct". It is the same as CDF + double CDF(double x); //!< This is the cummumlative probablity of match being corrrect (or more precisely the probility the score (after transform) being an outlier for the distribution, which given the transforms applied, so bigger is better, this is the probablity the score is correct. + double Inv(double p); //!< This is score for which one would obtain CDF probability p (i.e. x such that p = CDF(x)) + + int ReNormalize(double *invec, double *outvec, int length); //!< W-score Renormalize the vecotor invec[0:length-1] into outvec (in and out can be same) return is 1 for success, <0 for error code + + + /// Use FitHight if your data is such that is larger is better. The code will still transform, and keep parmeters to keep small data away from zero. + // If you get scores that are complain about it being negative, make a MR object with different (larger) translate amount + /// returns 1 for success, <0 for error code + int FitHigh(double* inputData, int inputDataSize, int fit_size=-1); + + ///Use FitLow if your data is such that smaller scores are better.. we'll transform it for you and keep the + ///transform parameters in the class so later calls to W_score or CDF do the right thing. + /// returns 1 for success, <0 for error code + int FitLow(double* inputData, int inputDataSize, int fit_size=-1);// + + /// the types of fitting supported for SVM modeling + typedef enum {complement_reject=1, positive_reject=2, complement_model=3, positive_model=4} MR_fitting_type; + + /// The function to use if you have SVM data, it separated out the data for the label of interst (or rejecting + /// the complement of that label, which is the default) and uses that for fitting. + /// Returns 1 if it worked, <0 for error codes. + int FitSVM(svm_node_libsvm* SVMdata, int inputDataSize, int label_of_interest =1, bool label_has_positive_score=true, int fit_type = 1, int fit_size=9 ); + + + friend std::ostream& operator<<( std::ostream&, const MetaRecognition& ); //!< various I/O functions + friend std::istream& operator>>( std::istream&, MetaRecognition& ); //!< various I/O functions + + void Save(std::ostream &outputStream) const; //!< various I/O functions + void Load(std::istream &inputStream); //!< various I/O functions + void Save(FILE *outputFile) const; //!< various I/O functions + void Load(FILE *inputFile); //!< various I/O functions + void Save(char* filename) const; //!< various I/O functions + void Load(char* filename); //!< various I/O functions + int get_fitting_size(); //!< Get get_fitting_size (aka tail size) + int set_fitting_size(int nsize); //!< reset object and define new fitting size + int get_translate_amount(); //!< Get get_internal tranlation amount (you probably don't need this, but just in case) + int set_translate_amount(int ntrans); //!< reset object and define new translate amount.. if you get errors because of negative data, increase this + int get_sign(); //!< Get get_internal sign variable. (you probably don't need this, but just in case) + int set_sign(int nsign); //!< reset object and set sign (you probably don't need this, but just in case) + double get_small_score(); //!< Get get_internal smaller translation amount (you probably don't need this, but just in case) + double set_small_score(double nscore); //!< reset object and reset internal smaller translation amount (you probably don't need this, but just in case) + bool verbose; //!< do we print internal/debugging stuff. Default is false. (you probably don't need this, but just in case) + std::string to_string(); //!< Convert this object to a C++ string + void from_string(std::string in); //!< Convert this object from a C++ string + +protected: + int EvtGeneric(double* inputData, int inputDataSize, int fit_inward=0, double x=0); + double parmhat[2]; //!< parameters of the Weibull, scale then shape + double parmci[4]; //!< confidence interval for parms scale high, scale low, shape high, shape low + double alpha; //!< parameter for estimation of size of confidence interval + int sign; //!< sign is postive is larger is better, negative means orginally smaller was better (we transformed for fitting). + MR_fitting_type ftype; //!< type of fitting used for SVM.. default is reject complement + int fitting_size; //!< tail size for fitting in any of the FitXX functions + int translate_amount; //!< we transform data so all fittng data data is positive and bigger is better, this predefined constant helps ensure more of the end-user data is non-negative. + double small_score; //!< the smallest score, so all fitting data is consistently postive. part of our transform + int scores_to_drop; //!< when fitting for recognition prediction, how many top score are hypothesized to be a match, so we can fit on non-match data. Only used in for fitting, no impact on transform. + bool isvalid; //!< is the parameters in the object valid. private: + +}; + +#endif diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/build_libmr_python.sh b/Downstream/Open-Set-Action-Recognition/experiments/libMR/build_libmr_python.sh new file mode 100644 index 0000000..7111afb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/build_libmr_python.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# This script creates a clean temporary environment Python, and then +# builds LibMR's python bindings. + +if [ '!' -f setup.py ]; then + echo Put this script into the same folder as setup.py + exit 1 +fi + +echo Step 1: Download virtualenv +wget -O virtualenv-1.9.1.tar.gz --no-check-certificate https://pypi.python.org/packages/source/v/virtualenv/virtualenv-1.9.1.tar.gz +tar xvf virtualenv-1.9.1.tar.gz + +echo Step 2: Create virtualenv +python virtualenv-1.9.1/virtualenv.py --system-site-packages venv + +echo Step 3: Entering virtualenv and installing dependencies +source venv/bin/activate +pip install cython==0.19.1 + +echo Step 5: Build the extension +rm -f python/libmr.cpp +python setup.py build_ext -i + +deactivate + +echo The .so should be built in the current folder. diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/compile.sh b/Downstream/Open-Set-Action-Recognition/experiments/libMR/compile.sh new file mode 100644 index 0000000..d3d6454 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/compile.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +echo "----- Removing previously compiled libmr.so -----\n" +rm -r build +rm *.model +rm libmr.so +rm *.dump +rm ../libmr.so + +echo "----- Building and compiling libmr ------- \n" +python setup.py build_ext -i +# cp libmr.so ../ + +# echo "----- Completed Compiling libmr -------- \n" +# echo "Now trying python -c \"import libmr\"" +# python test_libmr.py +# echo "----- Compiling Done. Now import *.so file in your application -----\n" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/estimate_wscores.py b/Downstream/Open-Set-Action-Recognition/experiments/libMR/estimate_wscores.py new file mode 100644 index 0000000..944786b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/estimate_wscores.py @@ -0,0 +1,26 @@ +import os, sys +import scipy as sp +import libmr + +def main(): + + posscores = sp.asarray([0.245 , 0.2632, 0.3233, 0.3573, 0.4014, 0.4055, 0.4212, 0.5677]) + test_distances = sp.asarray([ 0.05, 0.1 , 0.25, 0.4 , 0.75, 1. , 1.5 , 2.]) + + mr = libmr.MR() + # since higher is worse and we want to fit the higher tail, + # use fit_high() + mr.fit_high(posscores, posscores.shape[0]) + wscores = mr.w_score_vector(test_distances) + for i in range(wscores.shape[0]): + print "%.2f %.2f %.2f" %(test_distances[i], wscores[i], mr.inv(wscores[i])) + # wscores are the ones to be used in the equation + # s_i * (1 - rho_i) + print "Low wscore --> Low probability that the score is outlier i.e. sample IS NOT outlier" + print "High wscore --> High probability that the score is outlier i.e. sample IS an outlier" + print "posscores: ", posscores + print "test_distances: ", test_distances + print "wscores: ", wscores + +if __name__ == "__main__": + main() diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.c b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.c new file mode 100644 index 0000000..06f2230 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.c @@ -0,0 +1 @@ +#error Do not use this file, it is the result of a failed Cython compilation. diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.cpp b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.cpp new file mode 100644 index 0000000..bc6a2d8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.cpp @@ -0,0 +1,26539 @@ +/* Generated by Cython 0.29.22 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [ + "/home/wentao/anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/core/include/numpy/arrayobject.h", + "/home/wentao/anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/core/include/numpy/ufuncobject.h", + "MetaRecognition.h" + ], + "include_dirs": [ + ".", + "/home/wentao/anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/core/include" + ], + "language": "c++", + "name": "libmr", + "sources": [ + "libmr.pyx", + "MetaRecognition.cpp", + "weibull.c" + ] + }, + "module_name": "libmr" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_22" +#define CYTHON_HEX_VERSION 0x001D16F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + template bool operator ==(U other) { return *ptr == other; } + template bool operator !=(U other) { return *ptr != other; } + private: + T *ptr; +}; + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__libmr +#define __PYX_HAVE_API__libmr +/* Early includes */ +#include "MetaRecognition.h" +#include +#include +#include "ios" +#include "new" +#include "stdexcept" +#include "typeinfo" +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" + + /* NumPy API declarations from "numpy/__init__.pxd" */ + +#include "pythread.h" +#include "pystate.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "libmr.pyx", + "stringsource", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + +/* MemviewSliceStruct.proto */ +struct __pyx_memoryview_obj; +typedef struct { + struct __pyx_memoryview_obj *memview; + char *data; + Py_ssize_t shape[8]; + Py_ssize_t strides[8]; + Py_ssize_t suboffsets[8]; +} __Pyx_memviewslice; +#define __Pyx_MemoryView_Len(m) (m.shape[0]) + +/* Atomics.proto */ +#include +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +#define __pyx_atomic_int_type int +#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ + !defined(__i386__) + #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type LONG + #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 + #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using Intel atomics" + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif +typedef volatile __pyx_atomic_int_type __pyx_atomic_int; +#if CYTHON_ATOMICS + #define __pyx_add_acquisition_count(memview)\ + __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) +#else + #define __pyx_add_acquisition_count(memview)\ + __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) +#endif + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":689 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":690 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":691 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":692 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":696 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":697 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":698 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":699 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":703 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":704 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":713 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":714 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":715 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":717 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":718 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":719 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":721 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":722 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":724 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":725 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":726 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ +struct __pyx_obj_5libmr_MR; +struct __pyx_array_obj; +struct __pyx_MemviewEnum_obj; +struct __pyx_memoryview_obj; +struct __pyx_memoryviewslice_obj; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":728 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":729 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":730 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":732 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* "libmr.pyx":95 + * + * # This is the Python wrapper class. + * cdef class MR: # <<<<<<<<<<<<<< + * cdef MetaRecognition *thisptr + * def __cinit__(self, int scores_to_drop=0, + */ +struct __pyx_obj_5libmr_MR { + PyObject_HEAD + MetaRecognition *thisptr; +}; + + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ +struct __pyx_array_obj { + PyObject_HEAD + struct __pyx_vtabstruct_array *__pyx_vtab; + char *data; + Py_ssize_t len; + char *format; + int ndim; + Py_ssize_t *_shape; + Py_ssize_t *_strides; + Py_ssize_t itemsize; + PyObject *mode; + PyObject *_format; + void (*callback_free_data)(void *); + int free_data; + int dtype_is_object; +}; + + +/* "View.MemoryView":279 + * + * @cname('__pyx_MemviewEnum') + * cdef class Enum(object): # <<<<<<<<<<<<<< + * cdef object name + * def __init__(self, name): + */ +struct __pyx_MemviewEnum_obj { + PyObject_HEAD + PyObject *name; +}; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ +struct __pyx_memoryview_obj { + PyObject_HEAD + struct __pyx_vtabstruct_memoryview *__pyx_vtab; + PyObject *obj; + PyObject *_size; + PyObject *_array_interface; + PyThread_type_lock lock; + __pyx_atomic_int acquisition_count[2]; + __pyx_atomic_int *acquisition_count_aligned_p; + Py_buffer view; + int flags; + int dtype_is_object; + __Pyx_TypeInfo *typeinfo; +}; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ +struct __pyx_memoryviewslice_obj { + struct __pyx_memoryview_obj __pyx_base; + __Pyx_memviewslice from_slice; + PyObject *from_object; + PyObject *(*to_object_func)(char *); + int (*to_dtype_func)(char *, PyObject *); +}; + + + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ + +struct __pyx_vtabstruct_array { + PyObject *(*get_memview)(struct __pyx_array_obj *); +}; +static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ + +struct __pyx_vtabstruct_memoryview { + char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); + PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); +}; +static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ + +struct __pyx_vtabstruct__memoryviewslice { + struct __pyx_vtabstruct_memoryview __pyx_base; +}; +static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); +#define __Pyx_PyObject_Dict_GetItem(obj, name)\ + (likely(PyDict_CheckExact(obj)) ?\ + __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) +#else +#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) +#endif + +/* PyIntCompare.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* BufferIndexError.proto */ +static void __Pyx_RaiseBufferIndexError(int axis); + +#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* MemviewSliceInit.proto */ +#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d +#define __Pyx_MEMVIEW_DIRECT 1 +#define __Pyx_MEMVIEW_PTR 2 +#define __Pyx_MEMVIEW_FULL 4 +#define __Pyx_MEMVIEW_CONTIG 8 +#define __Pyx_MEMVIEW_STRIDED 16 +#define __Pyx_MEMVIEW_FOLLOW 32 +#define __Pyx_IS_C_CONTIG 1 +#define __Pyx_IS_F_CONTIG 2 +static int __Pyx_init_memviewslice( + struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference); +static CYTHON_INLINE int __pyx_add_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) +#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) +#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) +#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) +static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* PyObjectCallNoArg.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); +#else +#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) +#endif + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* IncludeStringH.proto */ +#include + +/* BytesEquals.proto */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* StrEquals.proto */ +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + +/* None.proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); + +/* UnaryNegOverflows.proto */ +#define UNARY_NEG_WOULD_OVERFLOW(x)\ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) + +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* decode_c_string_utf16.proto */ +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/* decode_c_string.proto */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* ListExtend.proto */ +static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { +#if CYTHON_COMPILING_IN_CPYTHON + PyObject* none = _PyList_Extend((PyListObject*)L, v); + if (unlikely(!none)) + return -1; + Py_DECREF(none); + return 0; +#else + return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); +#endif +} + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* None.proto */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* HasAttr.proto */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable); + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +/* MemviewSliceIsContig.proto */ +static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); + +/* OverlappingSlices.proto */ +static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize); + +/* Capsule.proto */ +static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); + +/* GCCDiagnostics.proto */ +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* TypeInfoCompare.proto */ +static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); + +/* MemviewSliceValidateAndInit.proto */ +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *, int writable_flag); + +/* CppExceptionConversion.proto */ +#ifndef __Pyx_CppExn2PyErr +#include +#include +#include +#include +static void __Pyx_CppExn2PyErr() { + try { + if (PyErr_Occurred()) + ; // let the latest Python exn pass through and ignore the current one + else + throw; + } catch (const std::bad_alloc& exn) { + PyErr_SetString(PyExc_MemoryError, exn.what()); + } catch (const std::bad_cast& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::bad_typeid& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::domain_error& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::invalid_argument& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::ios_base::failure& exn) { + PyErr_SetString(PyExc_IOError, exn.what()); + } catch (const std::out_of_range& exn) { + PyErr_SetString(PyExc_IndexError, exn.what()); + } catch (const std::overflow_error& exn) { + PyErr_SetString(PyExc_OverflowError, exn.what()); + } catch (const std::range_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::underflow_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::exception& exn) { + PyErr_SetString(PyExc_RuntimeError, exn.what()); + } + catch (...) + { + PyErr_SetString(PyExc_RuntimeError, "Unknown exception"); + } +} +#endif + +/* Print.proto */ +static int __Pyx_Print(PyObject*, PyObject *, int); +#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3 +static PyObject* __pyx_print = 0; +static PyObject* __pyx_print_kwargs = 0; +#endif + +/* MemviewDtypeToObject.proto */ +static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); +static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* MemviewSliceCopyTemplate.proto */ +static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* PrintOne.proto */ +static int __Pyx_PrintOne(PyObject* stream, PyObject *o); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdlib' */ + +/* Module declarations from 'libcpp' */ + +/* Module declarations from 'libcpp.string' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; + +/* Module declarations from 'libmr' */ +static PyTypeObject *__pyx_ptype_5libmr_MR = 0; +static PyTypeObject *__pyx_array_type = 0; +static PyTypeObject *__pyx_MemviewEnum_type = 0; +static PyTypeObject *__pyx_memoryview_type = 0; +static PyTypeObject *__pyx_memoryviewslice_type = 0; +static PyObject *generic = 0; +static PyObject *strided = 0; +static PyObject *indirect = 0; +static PyObject *contiguous = 0; +static PyObject *indirect_contiguous = 0; +static int __pyx_memoryview_thread_locks_used; +static PyThread_type_lock __pyx_memoryview_thread_locks[8]; +static CYTHON_INLINE PyObject *__pyx_convert_PyObject_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyUnicode_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyStr_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyBytes_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyByteArray_string_to_py_std__in_string(std::string const &); /*proto*/ +static std::string __pyx_convert_string_from_py_std__in_string(PyObject *); /*proto*/ +static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ +static void *__pyx_align_pointer(void *, size_t); /*proto*/ +static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ +static PyObject *_unellipsify(PyObject *, int); /*proto*/ +static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ +static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ +static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ +static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ +static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ +static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ +static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ +static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ +static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_double_t = { "double_t", NULL, sizeof(__pyx_t_5numpy_double_t), { 0 }, 0, 'R', 0, 0 }; +static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "libmr" +extern int __pyx_module_is_main_libmr; +int __pyx_module_is_main_libmr = 0; + +/* Implementation of 'libmr' */ +static PyObject *__pyx_builtin_xrange; +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_ImportError; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_enumerate; +static PyObject *__pyx_builtin_Ellipsis; +static PyObject *__pyx_builtin_id; +static PyObject *__pyx_builtin_IndexError; +static const char __pyx_k_O[] = "O"; +static const char __pyx_k_c[] = "c"; +static const char __pyx_k_d[] = "d"; +static const char __pyx_k_x[] = "x"; +static const char __pyx_k_MR[] = "MR"; +static const char __pyx_k_id[] = "id"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_end[] = "end"; +static const char __pyx_k_new[] = "__new__"; +static const char __pyx_k_obj[] = "obj"; +static const char __pyx_k_base[] = "base"; +static const char __pyx_k_dict[] = "__dict__"; +static const char __pyx_k_file[] = "file"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mode[] = "mode"; +static const char __pyx_k_name[] = "name"; +static const char __pyx_k_ndim[] = "ndim"; +static const char __pyx_k_pack[] = "pack"; +static const char __pyx_k_pymr[] = "pymr"; +static const char __pyx_k_size[] = "size"; +static const char __pyx_k_step[] = "step"; +static const char __pyx_k_stop[] = "stop"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_ASCII[] = "ASCII"; +static const char __pyx_k_alpha[] = "alpha"; +static const char __pyx_k_class[] = "__class__"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_error[] = "error"; +static const char __pyx_k_flags[] = "flags"; +static const char __pyx_k_input[] = "input"; +static const char __pyx_k_libmr[] = "libmr"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_print[] = "print"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_start[] = "start"; +static const char __pyx_k_zeros[] = "zeros"; +static const char __pyx_k_encode[] = "encode"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_labels[] = "labels"; +static const char __pyx_k_name_2[] = "__name__"; +static const char __pyx_k_pickle[] = "pickle"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_scores[] = "scores"; +static const char __pyx_k_struct[] = "struct"; +static const char __pyx_k_unpack[] = "unpack"; +static const char __pyx_k_update[] = "update"; +static const char __pyx_k_xrange[] = "xrange"; +static const char __pyx_k_fortran[] = "fortran"; +static const char __pyx_k_has_key[] = "has_key"; +static const char __pyx_k_memview[] = "memview"; +static const char __pyx_k_verbose[] = "verbose"; +static const char __pyx_k_Ellipsis[] = "Ellipsis"; +static const char __pyx_k_fit_size[] = "fit_size"; +static const char __pyx_k_fit_type[] = "fit_type"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_itemsize[] = "itemsize"; +static const char __pyx_k_pyx_type[] = "__pyx_type"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_svm_data[] = "svm_data"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_enumerate[] = "enumerate"; +static const char __pyx_k_inputData[] = "inputData"; +static const char __pyx_k_libmr_pyx[] = "libmr.pyx"; +static const char __pyx_k_pyx_state[] = "__pyx_state"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_threshold[] = "threshold"; +static const char __pyx_k_IndexError[] = "IndexError"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_pyx_result[] = "__pyx_result"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_MR_object_r[] = ""; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_PickleError[] = "PickleError"; +static const char __pyx_k_fitting_size[] = "fitting_size"; +static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; +static const char __pyx_k_stringsource[] = "stringsource"; +static const char __pyx_k_inputDataSize[] = "inputDataSize"; +static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_scores_to_drop[] = "scores_to_drop"; +static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; +static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; +static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; +static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_load_from_string[] = "load_from_string"; +static const char __pyx_k_translate_amount[] = "translate_amount"; +static const char __pyx_k_label_of_interest[] = "label_of_interest"; +static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_strided_and_direct[] = ""; +static const char __pyx_k_strided_and_indirect[] = ""; +static const char __pyx_k_contiguous_and_direct[] = ""; +static const char __pyx_k_MemoryView_of_r_object[] = ""; +static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; +static const char __pyx_k_contiguous_and_indirect[] = ""; +static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; +static const char __pyx_k_label_has_positive_score[] = "label_has_positive_score"; +static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; +static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; +static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; +static const char __pyx_k_strided_and_direct_or_indirect[] = ""; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; +static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; +static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; +static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; +static const char __pyx_k_Data_initizalization_complete_No[] = "Data initizalization complete. Now calling C++ code"; +static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; +static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; +static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; +static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; +static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; +static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; +static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; +static PyObject *__pyx_n_s_ASCII; +static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; +static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; +static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; +static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; +static PyObject *__pyx_kp_s_Cannot_index_with_type_s; +static PyObject *__pyx_kp_s_Data_initizalization_complete_No; +static PyObject *__pyx_n_s_Ellipsis; +static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; +static PyObject *__pyx_n_s_IndexError; +static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; +static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; +static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; +static PyObject *__pyx_n_s_MR; +static PyObject *__pyx_kp_s_MR_object_r; +static PyObject *__pyx_n_s_MemoryError; +static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; +static PyObject *__pyx_kp_s_MemoryView_of_r_object; +static PyObject *__pyx_n_b_O; +static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; +static PyObject *__pyx_n_s_PickleError; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_View_MemoryView; +static PyObject *__pyx_n_s_allocate_buffer; +static PyObject *__pyx_n_s_alpha; +static PyObject *__pyx_n_s_base; +static PyObject *__pyx_n_s_c; +static PyObject *__pyx_n_u_c; +static PyObject *__pyx_n_s_class; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_kp_s_contiguous_and_direct; +static PyObject *__pyx_kp_s_contiguous_and_indirect; +static PyObject *__pyx_n_s_d; +static PyObject *__pyx_n_s_dict; +static PyObject *__pyx_n_s_dtype; +static PyObject *__pyx_n_s_dtype_is_object; +static PyObject *__pyx_n_s_encode; +static PyObject *__pyx_n_s_end; +static PyObject *__pyx_n_s_enumerate; +static PyObject *__pyx_n_s_error; +static PyObject *__pyx_n_s_file; +static PyObject *__pyx_n_s_fit_size; +static PyObject *__pyx_n_s_fit_type; +static PyObject *__pyx_n_s_fitting_size; +static PyObject *__pyx_n_s_flags; +static PyObject *__pyx_n_s_format; +static PyObject *__pyx_n_s_fortran; +static PyObject *__pyx_n_u_fortran; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; +static PyObject *__pyx_n_s_has_key; +static PyObject *__pyx_n_s_id; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_input; +static PyObject *__pyx_n_s_inputData; +static PyObject *__pyx_n_s_inputDataSize; +static PyObject *__pyx_n_s_itemsize; +static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; +static PyObject *__pyx_n_s_label_has_positive_score; +static PyObject *__pyx_n_s_label_of_interest; +static PyObject *__pyx_n_s_labels; +static PyObject *__pyx_n_s_libmr; +static PyObject *__pyx_kp_s_libmr_pyx; +static PyObject *__pyx_n_s_load_from_string; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_memview; +static PyObject *__pyx_n_s_mode; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_name_2; +static PyObject *__pyx_n_s_ndim; +static PyObject *__pyx_n_s_new; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_obj; +static PyObject *__pyx_n_s_pack; +static PyObject *__pyx_n_s_pickle; +static PyObject *__pyx_n_s_print; +static PyObject *__pyx_n_s_pymr; +static PyObject *__pyx_n_s_pyx_PickleError; +static PyObject *__pyx_n_s_pyx_checksum; +static PyObject *__pyx_n_s_pyx_getbuffer; +static PyObject *__pyx_n_s_pyx_result; +static PyObject *__pyx_n_s_pyx_state; +static PyObject *__pyx_n_s_pyx_type; +static PyObject *__pyx_n_s_pyx_unpickle_Enum; +static PyObject *__pyx_n_s_pyx_vtable; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_scores; +static PyObject *__pyx_n_s_scores_to_drop; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_shape; +static PyObject *__pyx_n_s_size; +static PyObject *__pyx_n_s_start; +static PyObject *__pyx_n_s_step; +static PyObject *__pyx_n_s_stop; +static PyObject *__pyx_kp_s_strided_and_direct; +static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; +static PyObject *__pyx_kp_s_strided_and_indirect; +static PyObject *__pyx_kp_s_stringsource; +static PyObject *__pyx_n_s_struct; +static PyObject *__pyx_n_s_svm_data; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_threshold; +static PyObject *__pyx_n_s_translate_amount; +static PyObject *__pyx_kp_s_unable_to_allocate_array_data; +static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; +static PyObject *__pyx_n_s_unpack; +static PyObject *__pyx_n_s_update; +static PyObject *__pyx_n_s_verbose; +static PyObject *__pyx_n_s_x; +static PyObject *__pyx_n_s_xrange; +static PyObject *__pyx_n_s_zeros; +static int __pyx_pf_5libmr_2MR___cinit__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_scores_to_drop, int __pyx_v_fitting_size, bool __pyx_v_verbose, double __pyx_v_alpha, int __pyx_v_translate_amount); /* proto */ +static void __pyx_pf_5libmr_2MR_2__dealloc__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_4fit_low(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_inputData, int __pyx_v_fit_size); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_6fit_high(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_inputData, int __pyx_v_fit_size); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_8mr_save(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_filename); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_10mr_load(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_filename); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_12fit_svm(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_svm_data, PyObject *__pyx_v_inputDataSize, PyObject *__pyx_v_label_of_interest, PyObject *__pyx_v_label_has_positive_score, PyObject *__pyx_v_fit_type, PyObject *__pyx_v_fit_size); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_8is_valid___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_14reset(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_16predict_match(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_x, double __pyx_v_threshold); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_18w_score(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_x); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_20cdf(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_x); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_22inv(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_p); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_24w_score_vector(struct __pyx_obj_5libmr_MR *__pyx_v_self, __Pyx_memviewslice __pyx_v_invec); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_26__str__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_28__repr__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_8tailsize___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static int __pyx_pf_5libmr_2MR_8tailsize_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_nsize); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_16translate_amount___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static int __pyx_pf_5libmr_2MR_16translate_amount_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_ntrans); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_4sign___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static int __pyx_pf_5libmr_2MR_4sign_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_nsign); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_11small_score___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static int __pyx_pf_5libmr_2MR_11small_score_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_nscore); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_7verbose___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static int __pyx_pf_5libmr_2MR_7verbose_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, bool __pyx_v_verbose); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_30__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_5libmr_MR *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_5libmr_2MR_32__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_5libmr_MR *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_5libmr_load_from_string(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_5libmr_MR(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static PyObject *__pyx_int_2; +static PyObject *__pyx_int_3; +static PyObject *__pyx_int_4; +static PyObject *__pyx_int_184977713; +static PyObject *__pyx_int_neg_1; +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_slice__19; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__14; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__16; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_tuple__18; +static PyObject *__pyx_tuple__20; +static PyObject *__pyx_tuple__21; +static PyObject *__pyx_tuple__22; +static PyObject *__pyx_tuple__23; +static PyObject *__pyx_tuple__25; +static PyObject *__pyx_tuple__26; +static PyObject *__pyx_tuple__27; +static PyObject *__pyx_tuple__28; +static PyObject *__pyx_tuple__29; +static PyObject *__pyx_tuple__30; +static PyObject *__pyx_codeobj__24; +static PyObject *__pyx_codeobj__31; +/* Late includes */ + +/* "libmr.pyx":97 + * cdef class MR: + * cdef MetaRecognition *thisptr + * def __cinit__(self, int scores_to_drop=0, # <<<<<<<<<<<<<< + * int fitting_size=9, + * bool verbose=False, + */ + +/* Python wrapper */ +static int __pyx_pw_5libmr_2MR_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_5libmr_2MR_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + int __pyx_v_scores_to_drop; + int __pyx_v_fitting_size; + bool __pyx_v_verbose; + double __pyx_v_alpha; + int __pyx_v_translate_amount; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_scores_to_drop,&__pyx_n_s_fitting_size,&__pyx_n_s_verbose,&__pyx_n_s_alpha,&__pyx_n_s_translate_amount,0}; + PyObject* values[5] = {0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_scores_to_drop); + if (value) { values[0] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 1: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fitting_size); + if (value) { values[1] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_verbose); + if (value) { values[2] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_alpha); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_translate_amount); + if (value) { values[4] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0, 97, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + } + if (values[0]) { + __pyx_v_scores_to_drop = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_scores_to_drop == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 97, __pyx_L3_error) + } else { + __pyx_v_scores_to_drop = ((int)0); + } + if (values[1]) { + __pyx_v_fitting_size = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_fitting_size == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 98, __pyx_L3_error) + } else { + __pyx_v_fitting_size = ((int)9); + } + if (values[2]) { + __pyx_v_verbose = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_verbose == ((bool)-1)) && PyErr_Occurred())) __PYX_ERR(0, 99, __pyx_L3_error) + } else { + + /* "libmr.pyx":99 + * def __cinit__(self, int scores_to_drop=0, + * int fitting_size=9, + * bool verbose=False, # <<<<<<<<<<<<<< + * double alpha=5.0, + * int translate_amount=10000): + */ + __pyx_v_verbose = ((bool)0); + } + if (values[3]) { + __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 100, __pyx_L3_error) + } else { + __pyx_v_alpha = ((double)5.0); + } + if (values[4]) { + __pyx_v_translate_amount = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_translate_amount == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 101, __pyx_L3_error) + } else { + __pyx_v_translate_amount = ((int)0x2710); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 97, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR___cinit__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), __pyx_v_scores_to_drop, __pyx_v_fitting_size, __pyx_v_verbose, __pyx_v_alpha, __pyx_v_translate_amount); + + /* "libmr.pyx":97 + * cdef class MR: + * cdef MetaRecognition *thisptr + * def __cinit__(self, int scores_to_drop=0, # <<<<<<<<<<<<<< + * int fitting_size=9, + * bool verbose=False, + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5libmr_2MR___cinit__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_scores_to_drop, int __pyx_v_fitting_size, bool __pyx_v_verbose, double __pyx_v_alpha, int __pyx_v_translate_amount) { + int __pyx_r; + __Pyx_RefNannyDeclarations + MetaRecognition *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "libmr.pyx":105 + * Create a new MR object. + * """ + * self.thisptr = new MetaRecognition(scores_to_drop,fitting_size,verbose,alpha,translate_amount) # <<<<<<<<<<<<<< + * def __dealloc__(self): + * del self.thisptr + */ + try { + __pyx_t_1 = new MetaRecognition(__pyx_v_scores_to_drop, __pyx_v_fitting_size, __pyx_v_verbose, __pyx_v_alpha, __pyx_v_translate_amount); + } catch(...) { + __Pyx_CppExn2PyErr(); + __PYX_ERR(0, 105, __pyx_L1_error) + } + __pyx_v_self->thisptr = __pyx_t_1; + + /* "libmr.pyx":97 + * cdef class MR: + * cdef MetaRecognition *thisptr + * def __cinit__(self, int scores_to_drop=0, # <<<<<<<<<<<<<< + * int fitting_size=9, + * bool verbose=False, + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("libmr.MR.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":106 + * """ + * self.thisptr = new MetaRecognition(scores_to_drop,fitting_size,verbose,alpha,translate_amount) + * def __dealloc__(self): # <<<<<<<<<<<<<< + * del self.thisptr + * def fit_low(self, inputData, int fit_size): + */ + +/* Python wrapper */ +static void __pyx_pw_5libmr_2MR_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_pw_5libmr_2MR_3__dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_pf_5libmr_2MR_2__dealloc__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5libmr_2MR_2__dealloc__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "libmr.pyx":107 + * self.thisptr = new MetaRecognition(scores_to_drop,fitting_size,verbose,alpha,translate_amount) + * def __dealloc__(self): + * del self.thisptr # <<<<<<<<<<<<<< + * def fit_low(self, inputData, int fit_size): + * """Use fit_low if your data is such that is smaller is better. Fits a + */ + delete __pyx_v_self->thisptr; + + /* "libmr.pyx":106 + * """ + * self.thisptr = new MetaRecognition(scores_to_drop,fitting_size,verbose,alpha,translate_amount) + * def __dealloc__(self): # <<<<<<<<<<<<<< + * del self.thisptr + * def fit_low(self, inputData, int fit_size): + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "libmr.pyx":108 + * def __dealloc__(self): + * del self.thisptr + * def fit_low(self, inputData, int fit_size): # <<<<<<<<<<<<<< + * """Use fit_low if your data is such that is smaller is better. Fits a + * MR object to the given data. We'll transform it for you + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_5fit_low(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5libmr_2MR_4fit_low[] = "Use fit_low if your data is such that is smaller is better. Fits a\n MR object to the given data. We'll transform it for you\n and keep the transform parameters in the class so later calls\n to W_score or CDF do the right thing."; +static PyObject *__pyx_pw_5libmr_2MR_5fit_low(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_inputData = 0; + int __pyx_v_fit_size; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fit_low (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inputData,&__pyx_n_s_fit_size,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputData)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fit_size)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_low", 1, 2, 2, 1); __PYX_ERR(0, 108, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fit_low") < 0)) __PYX_ERR(0, 108, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_inputData = values[0]; + __pyx_v_fit_size = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_fit_size == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 108, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fit_low", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 108, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.fit_low", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_4fit_low(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), __pyx_v_inputData, __pyx_v_fit_size); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_4fit_low(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_inputData, int __pyx_v_fit_size) { + double *__pyx_v_data; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + double __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("fit_low", 0); + + /* "libmr.pyx":114 + * to W_score or CDF do the right thing.""" + * cdef double *data + * data = malloc(sizeof(double)*len(inputData)) # <<<<<<<<<<<<<< + * for i in xrange(len(inputData)): + * data[i] = inputData[i] + */ + __pyx_t_1 = PyObject_Length(__pyx_v_inputData); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 114, __pyx_L1_error) + __pyx_v_data = ((double *)malloc(((sizeof(double)) * __pyx_t_1))); + + /* "libmr.pyx":115 + * cdef double *data + * data = malloc(sizeof(double)*len(inputData)) + * for i in xrange(len(inputData)): # <<<<<<<<<<<<<< + * data[i] = inputData[i] + * self.thisptr.FitLow(data, len(inputData), fit_size) + */ + __pyx_t_1 = PyObject_Length(__pyx_v_inputData); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 115, __pyx_L1_error) + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "libmr.pyx":116 + * data = malloc(sizeof(double)*len(inputData)) + * for i in xrange(len(inputData)): + * data[i] = inputData[i] # <<<<<<<<<<<<<< + * self.thisptr.FitLow(data, len(inputData), fit_size) + * free(data) + */ + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_inputData, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + (__pyx_v_data[__pyx_v_i]) = __pyx_t_5; + } + + /* "libmr.pyx":117 + * for i in xrange(len(inputData)): + * data[i] = inputData[i] + * self.thisptr.FitLow(data, len(inputData), fit_size) # <<<<<<<<<<<<<< + * free(data) + * def fit_high(self, inputData, int fit_size): + */ + __pyx_t_1 = PyObject_Length(__pyx_v_inputData); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 117, __pyx_L1_error) + (void)(__pyx_v_self->thisptr->FitLow(__pyx_v_data, __pyx_t_1, __pyx_v_fit_size)); + + /* "libmr.pyx":118 + * data[i] = inputData[i] + * self.thisptr.FitLow(data, len(inputData), fit_size) + * free(data) # <<<<<<<<<<<<<< + * def fit_high(self, inputData, int fit_size): + * """Use fit_high if your data is such that is larger is better. Fits a + */ + free(__pyx_v_data); + + /* "libmr.pyx":108 + * def __dealloc__(self): + * del self.thisptr + * def fit_low(self, inputData, int fit_size): # <<<<<<<<<<<<<< + * """Use fit_low if your data is such that is smaller is better. Fits a + * MR object to the given data. We'll transform it for you + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("libmr.MR.fit_low", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":119 + * self.thisptr.FitLow(data, len(inputData), fit_size) + * free(data) + * def fit_high(self, inputData, int fit_size): # <<<<<<<<<<<<<< + * """Use fit_high if your data is such that is larger is better. Fits a + * MR object to the given data. We'll transform it for you + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_7fit_high(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5libmr_2MR_6fit_high[] = "Use fit_high if your data is such that is larger is better. Fits a\n MR object to the given data. We'll transform it for you\n and keep the transform parameters in the class so later calls\n to W_score or CDF do the right thing.\n "; +static PyObject *__pyx_pw_5libmr_2MR_7fit_high(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_inputData = 0; + int __pyx_v_fit_size; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fit_high (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inputData,&__pyx_n_s_fit_size,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputData)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fit_size)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_high", 1, 2, 2, 1); __PYX_ERR(0, 119, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fit_high") < 0)) __PYX_ERR(0, 119, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_inputData = values[0]; + __pyx_v_fit_size = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_fit_size == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 119, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fit_high", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 119, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.fit_high", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_6fit_high(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), __pyx_v_inputData, __pyx_v_fit_size); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_6fit_high(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_inputData, int __pyx_v_fit_size) { + double *__pyx_v_data; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + double __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("fit_high", 0); + + /* "libmr.pyx":126 + * """ + * cdef double *data + * data = malloc(sizeof(double)*len(inputData)) # <<<<<<<<<<<<<< + * for i in xrange(len(inputData)): + * data[i] = inputData[i] + */ + __pyx_t_1 = PyObject_Length(__pyx_v_inputData); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 126, __pyx_L1_error) + __pyx_v_data = ((double *)malloc(((sizeof(double)) * __pyx_t_1))); + + /* "libmr.pyx":127 + * cdef double *data + * data = malloc(sizeof(double)*len(inputData)) + * for i in xrange(len(inputData)): # <<<<<<<<<<<<<< + * data[i] = inputData[i] + * self.thisptr.FitHigh(data, len(inputData), fit_size) + */ + __pyx_t_1 = PyObject_Length(__pyx_v_inputData); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 127, __pyx_L1_error) + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "libmr.pyx":128 + * data = malloc(sizeof(double)*len(inputData)) + * for i in xrange(len(inputData)): + * data[i] = inputData[i] # <<<<<<<<<<<<<< + * self.thisptr.FitHigh(data, len(inputData), fit_size) + * free(data) + */ + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_inputData, __pyx_v_i, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __pyx_PyFloat_AsDouble(__pyx_t_4); if (unlikely((__pyx_t_5 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 128, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + (__pyx_v_data[__pyx_v_i]) = __pyx_t_5; + } + + /* "libmr.pyx":129 + * for i in xrange(len(inputData)): + * data[i] = inputData[i] + * self.thisptr.FitHigh(data, len(inputData), fit_size) # <<<<<<<<<<<<<< + * free(data) + * + */ + __pyx_t_1 = PyObject_Length(__pyx_v_inputData); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 129, __pyx_L1_error) + (void)(__pyx_v_self->thisptr->FitHigh(__pyx_v_data, __pyx_t_1, __pyx_v_fit_size)); + + /* "libmr.pyx":130 + * data[i] = inputData[i] + * self.thisptr.FitHigh(data, len(inputData), fit_size) + * free(data) # <<<<<<<<<<<<<< + * + * def mr_save(self, filename): + */ + free(__pyx_v_data); + + /* "libmr.pyx":119 + * self.thisptr.FitLow(data, len(inputData), fit_size) + * free(data) + * def fit_high(self, inputData, int fit_size): # <<<<<<<<<<<<<< + * """Use fit_high if your data is such that is larger is better. Fits a + * MR object to the given data. We'll transform it for you + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("libmr.MR.fit_high", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":132 + * free(data) + * + * def mr_save(self, filename): # <<<<<<<<<<<<<< + * """ + * save mr object to file + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_9mr_save(PyObject *__pyx_v_self, PyObject *__pyx_v_filename); /*proto*/ +static char __pyx_doc_5libmr_2MR_8mr_save[] = "\n save mr object to file\n "; +static PyObject *__pyx_pw_5libmr_2MR_9mr_save(PyObject *__pyx_v_self, PyObject *__pyx_v_filename) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("mr_save (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_8mr_save(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((PyObject *)__pyx_v_filename)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_8mr_save(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_filename) { + char *__pyx_v_filetosave; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("mr_save", 0); + + /* "libmr.pyx":137 + * """ + * cdef char *filetosave + * filetosave = filename # <<<<<<<<<<<<<< + * self.thisptr.Save(filetosave) + * + */ + __pyx_t_1 = __Pyx_PyObject_AsWritableString(__pyx_v_filename); if (unlikely((!__pyx_t_1) && PyErr_Occurred())) __PYX_ERR(0, 137, __pyx_L1_error) + __pyx_v_filetosave = __pyx_t_1; + + /* "libmr.pyx":138 + * cdef char *filetosave + * filetosave = filename + * self.thisptr.Save(filetosave) # <<<<<<<<<<<<<< + * + * def mr_load(self, filename): + */ + __pyx_v_self->thisptr->Save(__pyx_v_filetosave); + + /* "libmr.pyx":132 + * free(data) + * + * def mr_save(self, filename): # <<<<<<<<<<<<<< + * """ + * save mr object to file + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("libmr.MR.mr_save", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":140 + * self.thisptr.Save(filetosave) + * + * def mr_load(self, filename): # <<<<<<<<<<<<<< + * """ + * save mr object to file + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_11mr_load(PyObject *__pyx_v_self, PyObject *__pyx_v_filename); /*proto*/ +static char __pyx_doc_5libmr_2MR_10mr_load[] = "\n save mr object to file\n "; +static PyObject *__pyx_pw_5libmr_2MR_11mr_load(PyObject *__pyx_v_self, PyObject *__pyx_v_filename) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("mr_load (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_10mr_load(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((PyObject *)__pyx_v_filename)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_10mr_load(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_filename) { + char *__pyx_v_filetosave; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("mr_load", 0); + + /* "libmr.pyx":145 + * """ + * cdef char *filetosave + * filetosave = filename # <<<<<<<<<<<<<< + * self.thisptr.Load(filetosave) + * + */ + __pyx_t_1 = __Pyx_PyObject_AsWritableString(__pyx_v_filename); if (unlikely((!__pyx_t_1) && PyErr_Occurred())) __PYX_ERR(0, 145, __pyx_L1_error) + __pyx_v_filetosave = __pyx_t_1; + + /* "libmr.pyx":146 + * cdef char *filetosave + * filetosave = filename + * self.thisptr.Load(filetosave) # <<<<<<<<<<<<<< + * + * def fit_svm(self, svm_data, inputDataSize, label_of_interest, + */ + __pyx_v_self->thisptr->Load(__pyx_v_filetosave); + + /* "libmr.pyx":140 + * self.thisptr.Save(filetosave) + * + * def mr_load(self, filename): # <<<<<<<<<<<<<< + * """ + * save mr object to file + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("libmr.MR.mr_load", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":148 + * self.thisptr.Load(filetosave) + * + * def fit_svm(self, svm_data, inputDataSize, label_of_interest, # <<<<<<<<<<<<<< + * label_has_positive_score, fit_type, fit_size ): + * """ + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_13fit_svm(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5libmr_2MR_12fit_svm[] = "\n Input:\n --------\n svm_data: dict containing labels and decision scores. \n eg. svm_data['scores'] = [], svm_data['labels'] = []\n inputDataSize : total no of decision scores\n label_of_interest : eg +1, -1\n label_has_positive_score : bool i.e 0 or 1\n fit_type : complement_reject=1, positive_reject=2, complement_model=3, positive_model=4\n fit_size : size of tail to be used\n\n Output:\n --------\n None\n You can access parameters from weibull fitting using other attributes.\n Loading/Saving of weibull model parameters can be done using load/save methods\n in MR class\n\n "; +static PyObject *__pyx_pw_5libmr_2MR_13fit_svm(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_svm_data = 0; + PyObject *__pyx_v_inputDataSize = 0; + PyObject *__pyx_v_label_of_interest = 0; + PyObject *__pyx_v_label_has_positive_score = 0; + PyObject *__pyx_v_fit_type = 0; + PyObject *__pyx_v_fit_size = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("fit_svm (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_svm_data,&__pyx_n_s_inputDataSize,&__pyx_n_s_label_of_interest,&__pyx_n_s_label_has_positive_score,&__pyx_n_s_fit_type,&__pyx_n_s_fit_size,0}; + PyObject* values[6] = {0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_svm_data)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_inputDataSize)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_svm", 1, 6, 6, 1); __PYX_ERR(0, 148, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_label_of_interest)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_svm", 1, 6, 6, 2); __PYX_ERR(0, 148, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_label_has_positive_score)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_svm", 1, 6, 6, 3); __PYX_ERR(0, 148, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fit_type)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_svm", 1, 6, 6, 4); __PYX_ERR(0, 148, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fit_size)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("fit_svm", 1, 6, 6, 5); __PYX_ERR(0, 148, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fit_svm") < 0)) __PYX_ERR(0, 148, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 6) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + } + __pyx_v_svm_data = values[0]; + __pyx_v_inputDataSize = values[1]; + __pyx_v_label_of_interest = values[2]; + __pyx_v_label_has_positive_score = values[3]; + __pyx_v_fit_type = values[4]; + __pyx_v_fit_size = values[5]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("fit_svm", 1, 6, 6, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 148, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.fit_svm", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_12fit_svm(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), __pyx_v_svm_data, __pyx_v_inputDataSize, __pyx_v_label_of_interest, __pyx_v_label_has_positive_score, __pyx_v_fit_type, __pyx_v_fit_size); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_12fit_svm(struct __pyx_obj_5libmr_MR *__pyx_v_self, PyObject *__pyx_v_svm_data, PyObject *__pyx_v_inputDataSize, PyObject *__pyx_v_label_of_interest, PyObject *__pyx_v_label_has_positive_score, PyObject *__pyx_v_fit_type, PyObject *__pyx_v_fit_size) { + struct svm_node_libsvm *__pyx_v_svm_data_to_c; + PyObject *__pyx_v_i = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + size_t __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + Py_ssize_t __pyx_t_6; + Py_ssize_t __pyx_t_7; + int __pyx_t_8; + PyObject *(*__pyx_t_9)(PyObject *); + int __pyx_t_10; + double __pyx_t_11; + int __pyx_t_12; + bool __pyx_t_13; + int __pyx_t_14; + int __pyx_t_15; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("fit_svm", 0); + + /* "libmr.pyx":173 + * cdef svm_node_libsvm *svm_data_to_c + * + * svm_data_to_c = < svm_node_libsvm* >malloc(inputDataSize * sizeof(svm_node_libsvm) ) # <<<<<<<<<<<<<< + * + * assert svm_data.has_key("scores") + */ + __pyx_t_1 = __Pyx_PyInt_FromSize_t((sizeof(struct svm_node_libsvm))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyNumber_Multiply(__pyx_v_inputDataSize, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_3 = __Pyx_PyInt_As_size_t(__pyx_t_2); if (unlikely((__pyx_t_3 == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 173, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_svm_data_to_c = ((struct svm_node_libsvm *)malloc(__pyx_t_3)); + + /* "libmr.pyx":175 + * svm_data_to_c = < svm_node_libsvm* >malloc(inputDataSize * sizeof(svm_node_libsvm) ) + * + * assert svm_data.has_key("scores") # <<<<<<<<<<<<<< + * assert svm_data.has_key("scores") + * assert len(svm_data["scores"]) == len(svm_data["labels"]) + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_svm_data, __pyx_n_s_has_key); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + } + } + __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_4, __pyx_n_s_scores) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_n_s_scores); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 175, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_5)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 175, __pyx_L1_error) + } + } + #endif + + /* "libmr.pyx":176 + * + * assert svm_data.has_key("scores") + * assert svm_data.has_key("scores") # <<<<<<<<<<<<<< + * assert len(svm_data["scores"]) == len(svm_data["labels"]) + * assert fit_type in [1, 2, 3, 4] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_svm_data, __pyx_n_s_has_key); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + } + } + __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_1, __pyx_t_4, __pyx_n_s_scores) : __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_n_s_scores); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 176, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_5)) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 176, __pyx_L1_error) + } + } + #endif + + /* "libmr.pyx":177 + * assert svm_data.has_key("scores") + * assert svm_data.has_key("scores") + * assert len(svm_data["scores"]) == len(svm_data["labels"]) # <<<<<<<<<<<<<< + * assert fit_type in [1, 2, 3, 4] + * for i in range(inputDataSize): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_2 = __Pyx_PyObject_Dict_GetItem(__pyx_v_svm_data, __pyx_n_s_scores); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_6 = PyObject_Length(__pyx_t_2); if (unlikely(__pyx_t_6 == ((Py_ssize_t)-1))) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Dict_GetItem(__pyx_v_svm_data, __pyx_n_s_labels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_7 = PyObject_Length(__pyx_t_2); if (unlikely(__pyx_t_7 == ((Py_ssize_t)-1))) __PYX_ERR(0, 177, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!((__pyx_t_6 == __pyx_t_7) != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 177, __pyx_L1_error) + } + } + #endif + + /* "libmr.pyx":178 + * assert svm_data.has_key("scores") + * assert len(svm_data["scores"]) == len(svm_data["labels"]) + * assert fit_type in [1, 2, 3, 4] # <<<<<<<<<<<<<< + * for i in range(inputDataSize): + * svm_data_to_c[i].index = svm_data["labels"][i] + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __Pyx_INCREF(__pyx_v_fit_type); + __pyx_t_2 = __pyx_v_fit_type; + __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_2, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (!__pyx_t_8) { + } else { + __pyx_t_5 = __pyx_t_8; + goto __pyx_L3_bool_binop_done; + } + __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_2, __pyx_int_2, 2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (!__pyx_t_8) { + } else { + __pyx_t_5 = __pyx_t_8; + goto __pyx_L3_bool_binop_done; + } + __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_2, __pyx_int_3, 3, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (!__pyx_t_8) { + } else { + __pyx_t_5 = __pyx_t_8; + goto __pyx_L3_bool_binop_done; + } + __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_t_2, __pyx_int_4, 4, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 178, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_5 = __pyx_t_8; + __pyx_L3_bool_binop_done:; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!(__pyx_t_5 != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 178, __pyx_L1_error) + } + } + #endif + + /* "libmr.pyx":179 + * assert len(svm_data["scores"]) == len(svm_data["labels"]) + * assert fit_type in [1, 2, 3, 4] + * for i in range(inputDataSize): # <<<<<<<<<<<<<< + * svm_data_to_c[i].index = svm_data["labels"][i] + * svm_data_to_c[i].value = svm_data["scores"][i] + */ + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_v_inputDataSize); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { + __pyx_t_1 = __pyx_t_2; __Pyx_INCREF(__pyx_t_1); __pyx_t_7 = 0; + __pyx_t_9 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 179, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_9 = Py_TYPE(__pyx_t_1)->tp_iternext; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 179, __pyx_L1_error) + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + for (;;) { + if (likely(!__pyx_t_9)) { + if (likely(PyList_CheckExact(__pyx_t_1))) { + if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 179, __pyx_L1_error) + #else + __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + #endif + } else { + if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_7); __Pyx_INCREF(__pyx_t_2); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 179, __pyx_L1_error) + #else + __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 179, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + #endif + } + } else { + __pyx_t_2 = __pyx_t_9(__pyx_t_1); + if (unlikely(!__pyx_t_2)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(0, 179, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_2); + } + __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); + __pyx_t_2 = 0; + + /* "libmr.pyx":180 + * assert fit_type in [1, 2, 3, 4] + * for i in range(inputDataSize): + * svm_data_to_c[i].index = svm_data["labels"][i] # <<<<<<<<<<<<<< + * svm_data_to_c[i].value = svm_data["scores"][i] + * + */ + __pyx_t_2 = __Pyx_PyObject_Dict_GetItem(__pyx_v_svm_data, __pyx_n_s_labels); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_v_i); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_4); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 180, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 180, __pyx_L1_error) + (__pyx_v_svm_data_to_c[__pyx_t_6]).index = __pyx_t_10; + + /* "libmr.pyx":181 + * for i in range(inputDataSize): + * svm_data_to_c[i].index = svm_data["labels"][i] + * svm_data_to_c[i].value = svm_data["scores"][i] # <<<<<<<<<<<<<< + * + * print "Data initizalization complete. Now calling C++ code" + */ + __pyx_t_4 = __Pyx_PyObject_Dict_GetItem(__pyx_v_svm_data, __pyx_n_s_scores); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_v_i); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_11 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_11 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L1_error) + (__pyx_v_svm_data_to_c[__pyx_t_6]).value = __pyx_t_11; + + /* "libmr.pyx":179 + * assert len(svm_data["scores"]) == len(svm_data["labels"]) + * assert fit_type in [1, 2, 3, 4] + * for i in range(inputDataSize): # <<<<<<<<<<<<<< + * svm_data_to_c[i].index = svm_data["labels"][i] + * svm_data_to_c[i].value = svm_data["scores"][i] + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "libmr.pyx":183 + * svm_data_to_c[i].value = svm_data["scores"][i] + * + * print "Data initizalization complete. Now calling C++ code" # <<<<<<<<<<<<<< + * self.thisptr.FitSVM(svm_data_to_c, inputDataSize, label_of_interest, label_has_positive_score, fit_type, fit_size) + * free(svm_data_to_c) + */ + if (__Pyx_PrintOne(0, __pyx_kp_s_Data_initizalization_complete_No) < 0) __PYX_ERR(0, 183, __pyx_L1_error) + + /* "libmr.pyx":184 + * + * print "Data initizalization complete. Now calling C++ code" + * self.thisptr.FitSVM(svm_data_to_c, inputDataSize, label_of_interest, label_has_positive_score, fit_type, fit_size) # <<<<<<<<<<<<<< + * free(svm_data_to_c) + * + */ + __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_v_inputDataSize); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 184, __pyx_L1_error) + __pyx_t_12 = __Pyx_PyInt_As_int(__pyx_v_label_of_interest); if (unlikely((__pyx_t_12 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 184, __pyx_L1_error) + __pyx_t_13 = __Pyx_PyObject_IsTrue(__pyx_v_label_has_positive_score); if (unlikely((__pyx_t_13 == ((bool)-1)) && PyErr_Occurred())) __PYX_ERR(0, 184, __pyx_L1_error) + __pyx_t_14 = __Pyx_PyInt_As_int(__pyx_v_fit_type); if (unlikely((__pyx_t_14 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 184, __pyx_L1_error) + __pyx_t_15 = __Pyx_PyInt_As_int(__pyx_v_fit_size); if (unlikely((__pyx_t_15 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 184, __pyx_L1_error) + (void)(__pyx_v_self->thisptr->FitSVM(__pyx_v_svm_data_to_c, __pyx_t_10, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15)); + + /* "libmr.pyx":185 + * print "Data initizalization complete. Now calling C++ code" + * self.thisptr.FitSVM(svm_data_to_c, inputDataSize, label_of_interest, label_has_positive_score, fit_type, fit_size) + * free(svm_data_to_c) # <<<<<<<<<<<<<< + * + * property is_valid: + */ + free(__pyx_v_svm_data_to_c); + + /* "libmr.pyx":148 + * self.thisptr.Load(filetosave) + * + * def fit_svm(self, svm_data, inputDataSize, label_of_interest, # <<<<<<<<<<<<<< + * label_has_positive_score, fit_type, fit_size ): + * """ + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("libmr.MR.fit_svm", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_i); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":188 + * + * property is_valid: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.is_valid() + * def reset(self): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_8is_valid_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_8is_valid_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_8is_valid___get__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_8is_valid___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "libmr.pyx":189 + * property is_valid: + * def __get__(self): + * return self.thisptr.is_valid() # <<<<<<<<<<<<<< + * def reset(self): + * self.thisptr.Reset() + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_self->thisptr->is_valid()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 189, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":188 + * + * property is_valid: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.is_valid() + * def reset(self): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.is_valid.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":190 + * def __get__(self): + * return self.thisptr.is_valid() + * def reset(self): # <<<<<<<<<<<<<< + * self.thisptr.Reset() + * def predict_match(self, double x, double threshold = .9999999): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_15reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_15reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("reset (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_14reset(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_14reset(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("reset", 0); + + /* "libmr.pyx":191 + * return self.thisptr.is_valid() + * def reset(self): + * self.thisptr.Reset() # <<<<<<<<<<<<<< + * def predict_match(self, double x, double threshold = .9999999): + * """ + */ + __pyx_v_self->thisptr->Reset(); + + /* "libmr.pyx":190 + * def __get__(self): + * return self.thisptr.is_valid() + * def reset(self): # <<<<<<<<<<<<<< + * self.thisptr.Reset() + * def predict_match(self, double x, double threshold = .9999999): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":192 + * def reset(self): + * self.thisptr.Reset() + * def predict_match(self, double x, double threshold = .9999999): # <<<<<<<<<<<<<< + * """ + * Is X from the "match" distribution (i.e. we reject null hypothesis + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_17predict_match(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_5libmr_2MR_16predict_match[] = "\n Is X from the \"match\" distribution (i.e. we reject null hypothesis\n of non-match)\n\n "; +static PyObject *__pyx_pw_5libmr_2MR_17predict_match(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + double __pyx_v_x; + double __pyx_v_threshold; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("predict_match (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_x,&__pyx_n_s_threshold,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threshold); + if (value) { values[1] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "predict_match") < 0)) __PYX_ERR(0, 192, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_x = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 192, __pyx_L3_error) + if (values[1]) { + __pyx_v_threshold = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_threshold == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 192, __pyx_L3_error) + } else { + __pyx_v_threshold = ((double).9999999); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("predict_match", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 192, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.predict_match", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_16predict_match(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), __pyx_v_x, __pyx_v_threshold); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_16predict_match(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_x, double __pyx_v_threshold) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("predict_match", 0); + + /* "libmr.pyx":198 + * + * """ + * return self.thisptr.Predict_Match(x,threshold) # <<<<<<<<<<<<<< + * def w_score(self, double x): + * """ + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_self->thisptr->Predict_Match(__pyx_v_x, __pyx_v_threshold)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":192 + * def reset(self): + * self.thisptr.Reset() + * def predict_match(self, double x, double threshold = .9999999): # <<<<<<<<<<<<<< + * """ + * Is X from the "match" distribution (i.e. we reject null hypothesis + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.predict_match", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":199 + * """ + * return self.thisptr.Predict_Match(x,threshold) + * def w_score(self, double x): # <<<<<<<<<<<<<< + * """ + * This is the commonly used function. After fitting, it returns the probability of the given score being "correct". It is the same as CDF + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_19w_score(PyObject *__pyx_v_self, PyObject *__pyx_arg_x); /*proto*/ +static char __pyx_doc_5libmr_2MR_18w_score[] = "\n\tThis is the commonly used function. After fitting, it returns the probability of the given score being \"correct\". It is the same as CDF\n "; +static PyObject *__pyx_pw_5libmr_2MR_19w_score(PyObject *__pyx_v_self, PyObject *__pyx_arg_x) { + double __pyx_v_x; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w_score (wrapper)", 0); + assert(__pyx_arg_x); { + __pyx_v_x = __pyx_PyFloat_AsDouble(__pyx_arg_x); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 199, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.w_score", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_18w_score(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((double)__pyx_v_x)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_18w_score(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_x) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("w_score", 0); + + /* "libmr.pyx":203 + * This is the commonly used function. After fitting, it returns the probability of the given score being "correct". It is the same as CDF + * """ + * return self.thisptr.W_score(x) # <<<<<<<<<<<<<< + * def cdf(self, double x): + * """ + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->thisptr->W_score(__pyx_v_x)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 203, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":199 + * """ + * return self.thisptr.Predict_Match(x,threshold) + * def w_score(self, double x): # <<<<<<<<<<<<<< + * """ + * This is the commonly used function. After fitting, it returns the probability of the given score being "correct". It is the same as CDF + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.w_score", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":204 + * """ + * return self.thisptr.W_score(x) + * def cdf(self, double x): # <<<<<<<<<<<<<< + * """ + * This is the cummumlative probablity of match being corrrect (or more precisely the probility the score (after transform) being an outlier for the distribution, which given the transforms applied, so bigger is better, this is the probablity the score is correct. + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_21cdf(PyObject *__pyx_v_self, PyObject *__pyx_arg_x); /*proto*/ +static char __pyx_doc_5libmr_2MR_20cdf[] = "\n This is the cummumlative probablity of match being corrrect (or more precisely the probility the score (after transform) being an outlier for the distribution, which given the transforms applied, so bigger is better, this is the probablity the score is correct.\n "; +static PyObject *__pyx_pw_5libmr_2MR_21cdf(PyObject *__pyx_v_self, PyObject *__pyx_arg_x) { + double __pyx_v_x; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("cdf (wrapper)", 0); + assert(__pyx_arg_x); { + __pyx_v_x = __pyx_PyFloat_AsDouble(__pyx_arg_x); if (unlikely((__pyx_v_x == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 204, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.cdf", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_20cdf(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((double)__pyx_v_x)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_20cdf(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_x) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("cdf", 0); + + /* "libmr.pyx":208 + * This is the cummumlative probablity of match being corrrect (or more precisely the probility the score (after transform) being an outlier for the distribution, which given the transforms applied, so bigger is better, this is the probablity the score is correct. + * """ + * return self.thisptr.CDF(x) # <<<<<<<<<<<<<< + * def inv(self, double p): + * """ + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->thisptr->CDF(__pyx_v_x)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 208, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":204 + * """ + * return self.thisptr.W_score(x) + * def cdf(self, double x): # <<<<<<<<<<<<<< + * """ + * This is the cummumlative probablity of match being corrrect (or more precisely the probility the score (after transform) being an outlier for the distribution, which given the transforms applied, so bigger is better, this is the probablity the score is correct. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.cdf", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":209 + * """ + * return self.thisptr.CDF(x) + * def inv(self, double p): # <<<<<<<<<<<<<< + * """ + * This is score for which one would obtain CDF probability p (i.e. x such that p = CDF(x)) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_23inv(PyObject *__pyx_v_self, PyObject *__pyx_arg_p); /*proto*/ +static char __pyx_doc_5libmr_2MR_22inv[] = "\n This is score for which one would obtain CDF probability p (i.e. x such that p = CDF(x))\n "; +static PyObject *__pyx_pw_5libmr_2MR_23inv(PyObject *__pyx_v_self, PyObject *__pyx_arg_p) { + double __pyx_v_p; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("inv (wrapper)", 0); + assert(__pyx_arg_p); { + __pyx_v_p = __pyx_PyFloat_AsDouble(__pyx_arg_p); if (unlikely((__pyx_v_p == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 209, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.inv", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_22inv(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((double)__pyx_v_p)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_22inv(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_p) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("inv", 0); + + /* "libmr.pyx":213 + * This is score for which one would obtain CDF probability p (i.e. x such that p = CDF(x)) + * """ + * return self.thisptr.Inv(p) # <<<<<<<<<<<<<< + * def w_score_vector(self, double[::1] invec): + * """ + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->thisptr->Inv(__pyx_v_p)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 213, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":209 + * """ + * return self.thisptr.CDF(x) + * def inv(self, double p): # <<<<<<<<<<<<<< + * """ + * This is score for which one would obtain CDF probability p (i.e. x such that p = CDF(x)) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.inv", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":214 + * """ + * return self.thisptr.Inv(p) + * def w_score_vector(self, double[::1] invec): # <<<<<<<<<<<<<< + * """ + * Apply w_score to each element of invec, returning a new vector of W-scores + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_25w_score_vector(PyObject *__pyx_v_self, PyObject *__pyx_arg_invec); /*proto*/ +static char __pyx_doc_5libmr_2MR_24w_score_vector[] = "\n Apply w_score to each element of invec, returning a new vector of W-scores\n "; +static PyObject *__pyx_pw_5libmr_2MR_25w_score_vector(PyObject *__pyx_v_self, PyObject *__pyx_arg_invec) { + __Pyx_memviewslice __pyx_v_invec = { 0, 0, { 0 }, { 0 }, { 0 } }; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("w_score_vector (wrapper)", 0); + assert(__pyx_arg_invec); { + __pyx_v_invec = __Pyx_PyObject_to_MemoryviewSlice_dc_double(__pyx_arg_invec, PyBUF_WRITABLE); if (unlikely(!__pyx_v_invec.memview)) __PYX_ERR(0, 214, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.w_score_vector", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_24w_score_vector(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), __pyx_v_invec); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_24w_score_vector(struct __pyx_obj_5libmr_MR *__pyx_v_self, __Pyx_memviewslice __pyx_v_invec) { + PyArrayObject *__pyx_v_new_vec = 0; + __Pyx_LocalBuf_ND __pyx_pybuffernd_new_vec; + __Pyx_Buffer __pyx_pybuffer_new_vec; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + size_t __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyArrayObject *__pyx_t_6 = NULL; + Py_ssize_t __pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("w_score_vector", 0); + __pyx_pybuffer_new_vec.pybuffer.buf = NULL; + __pyx_pybuffer_new_vec.refcount = 0; + __pyx_pybuffernd_new_vec.data = NULL; + __pyx_pybuffernd_new_vec.rcbuffer = &__pyx_pybuffer_new_vec; + + /* "libmr.pyx":218 + * Apply w_score to each element of invec, returning a new vector of W-scores + * """ + * cdef np.ndarray[np.double_t,ndim=1]new_vec = np.zeros(len(invec), dtype='d') # <<<<<<<<<<<<<< + * self.thisptr.ReNormalize(&invec[0], &new_vec[0], len(invec)) + * return new_vec + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_3 = __Pyx_MemoryView_Len(__pyx_v_invec); + __pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_n_s_d) < 0) __PYX_ERR(0, 218, __pyx_L1_error) + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 218, __pyx_L1_error) + __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_new_vec.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { + __pyx_v_new_vec = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_new_vec.rcbuffer->pybuffer.buf = NULL; + __PYX_ERR(0, 218, __pyx_L1_error) + } else {__pyx_pybuffernd_new_vec.diminfo[0].strides = __pyx_pybuffernd_new_vec.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_new_vec.diminfo[0].shape = __pyx_pybuffernd_new_vec.rcbuffer->pybuffer.shape[0]; + } + } + __pyx_t_6 = 0; + __pyx_v_new_vec = ((PyArrayObject *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "libmr.pyx":219 + * """ + * cdef np.ndarray[np.double_t,ndim=1]new_vec = np.zeros(len(invec), dtype='d') + * self.thisptr.ReNormalize(&invec[0], &new_vec[0], len(invec)) # <<<<<<<<<<<<<< + * return new_vec + * def __str__(self): + */ + __pyx_t_7 = 0; + __pyx_t_8 = -1; + if (__pyx_t_7 < 0) { + __pyx_t_7 += __pyx_v_invec.shape[0]; + if (unlikely(__pyx_t_7 < 0)) __pyx_t_8 = 0; + } else if (unlikely(__pyx_t_7 >= __pyx_v_invec.shape[0])) __pyx_t_8 = 0; + if (unlikely(__pyx_t_8 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_8); + __PYX_ERR(0, 219, __pyx_L1_error) + } + __pyx_t_9 = 0; + __pyx_t_8 = -1; + if (__pyx_t_9 < 0) { + __pyx_t_9 += __pyx_pybuffernd_new_vec.diminfo[0].shape; + if (unlikely(__pyx_t_9 < 0)) __pyx_t_8 = 0; + } else if (unlikely(__pyx_t_9 >= __pyx_pybuffernd_new_vec.diminfo[0].shape)) __pyx_t_8 = 0; + if (unlikely(__pyx_t_8 != -1)) { + __Pyx_RaiseBufferIndexError(__pyx_t_8); + __PYX_ERR(0, 219, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_MemoryView_Len(__pyx_v_invec); + (void)(__pyx_v_self->thisptr->ReNormalize((&(*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_invec.data) + __pyx_t_7)) )))), (&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_new_vec.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_new_vec.diminfo[0].strides))), __pyx_t_3)); + + /* "libmr.pyx":220 + * cdef np.ndarray[np.double_t,ndim=1]new_vec = np.zeros(len(invec), dtype='d') + * self.thisptr.ReNormalize(&invec[0], &new_vec[0], len(invec)) + * return new_vec # <<<<<<<<<<<<<< + * def __str__(self): + * """ + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_new_vec)); + __pyx_r = ((PyObject *)__pyx_v_new_vec); + goto __pyx_L0; + + /* "libmr.pyx":214 + * """ + * return self.thisptr.Inv(p) + * def w_score_vector(self, double[::1] invec): # <<<<<<<<<<<<<< + * """ + * Apply w_score to each element of invec, returning a new vector of W-scores + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_vec.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("libmr.MR.w_score_vector", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_new_vec.rcbuffer->pybuffer); + __pyx_L2:; + __PYX_XDEC_MEMVIEW(&__pyx_v_invec, 1); + __Pyx_XDECREF((PyObject *)__pyx_v_new_vec); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":221 + * self.thisptr.ReNormalize(&invec[0], &new_vec[0], len(invec)) + * return new_vec + * def __str__(self): # <<<<<<<<<<<<<< + * """ + * Serialize the MR object to a string. Use load_from_string to recover it. + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_27__str__(PyObject *__pyx_v_self); /*proto*/ +static char __pyx_doc_5libmr_2MR_26__str__[] = "\n Serialize the MR object to a string. Use load_from_string to recover it.\n "; +#if CYTHON_COMPILING_IN_CPYTHON +struct wrapperbase __pyx_wrapperbase_5libmr_2MR_26__str__; +#endif +static PyObject *__pyx_pw_5libmr_2MR_27__str__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_26__str__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_26__str__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__str__", 0); + + /* "libmr.pyx":225 + * Serialize the MR object to a string. Use load_from_string to recover it. + * """ + * return self.thisptr.to_string() # <<<<<<<<<<<<<< + * def __repr__(self): + * return "" % str(self) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_self->thisptr->to_string()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 225, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":221 + * self.thisptr.ReNormalize(&invec[0], &new_vec[0], len(invec)) + * return new_vec + * def __str__(self): # <<<<<<<<<<<<<< + * """ + * Serialize the MR object to a string. Use load_from_string to recover it. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":226 + * """ + * return self.thisptr.to_string() + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % str(self) + * property tailsize: + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_29__repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_29__repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_28__repr__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_28__repr__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "libmr.pyx":227 + * return self.thisptr.to_string() + * def __repr__(self): + * return "" % str(self) # <<<<<<<<<<<<<< + * property tailsize: + * def __get__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyString_Type)), ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 227, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyString_FormatSafe(__pyx_kp_s_MR_object_r, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 227, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "libmr.pyx":226 + * """ + * return self.thisptr.to_string() + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % str(self) + * property tailsize: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("libmr.MR.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":229 + * return "" % str(self) + * property tailsize: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_fitting_size() + * def __set__(self, int nsize): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_8tailsize_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_8tailsize_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_8tailsize___get__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_8tailsize___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "libmr.pyx":230 + * property tailsize: + * def __get__(self): + * return self.thisptr.get_fitting_size() # <<<<<<<<<<<<<< + * def __set__(self, int nsize): + * self.thisptr.set_fitting_size(nsize) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->thisptr->get_fitting_size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 230, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":229 + * return "" % str(self) + * property tailsize: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_fitting_size() + * def __set__(self, int nsize): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.tailsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":231 + * def __get__(self): + * return self.thisptr.get_fitting_size() + * def __set__(self, int nsize): # <<<<<<<<<<<<<< + * self.thisptr.set_fitting_size(nsize) + * property translate_amount: + */ + +/* Python wrapper */ +static int __pyx_pw_5libmr_2MR_8tailsize_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_nsize); /*proto*/ +static int __pyx_pw_5libmr_2MR_8tailsize_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_nsize) { + int __pyx_v_nsize; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); + assert(__pyx_arg_nsize); { + __pyx_v_nsize = __Pyx_PyInt_As_int(__pyx_arg_nsize); if (unlikely((__pyx_v_nsize == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 231, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.tailsize.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_8tailsize_2__set__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((int)__pyx_v_nsize)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5libmr_2MR_8tailsize_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_nsize) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__", 0); + + /* "libmr.pyx":232 + * return self.thisptr.get_fitting_size() + * def __set__(self, int nsize): + * self.thisptr.set_fitting_size(nsize) # <<<<<<<<<<<<<< + * property translate_amount: + * def __get__(self): + */ + (void)(__pyx_v_self->thisptr->set_fitting_size(__pyx_v_nsize)); + + /* "libmr.pyx":231 + * def __get__(self): + * return self.thisptr.get_fitting_size() + * def __set__(self, int nsize): # <<<<<<<<<<<<<< + * self.thisptr.set_fitting_size(nsize) + * property translate_amount: + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":234 + * self.thisptr.set_fitting_size(nsize) + * property translate_amount: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_translate_amount() + * def __set__(self, int ntrans): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_16translate_amount_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_16translate_amount_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_16translate_amount___get__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_16translate_amount___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "libmr.pyx":235 + * property translate_amount: + * def __get__(self): + * return self.thisptr.get_translate_amount() # <<<<<<<<<<<<<< + * def __set__(self, int ntrans): + * self.thisptr.set_translate_amount(ntrans) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->thisptr->get_translate_amount()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":234 + * self.thisptr.set_fitting_size(nsize) + * property translate_amount: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_translate_amount() + * def __set__(self, int ntrans): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.translate_amount.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":236 + * def __get__(self): + * return self.thisptr.get_translate_amount() + * def __set__(self, int ntrans): # <<<<<<<<<<<<<< + * self.thisptr.set_translate_amount(ntrans) + * property sign: + */ + +/* Python wrapper */ +static int __pyx_pw_5libmr_2MR_16translate_amount_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_ntrans); /*proto*/ +static int __pyx_pw_5libmr_2MR_16translate_amount_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_ntrans) { + int __pyx_v_ntrans; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); + assert(__pyx_arg_ntrans); { + __pyx_v_ntrans = __Pyx_PyInt_As_int(__pyx_arg_ntrans); if (unlikely((__pyx_v_ntrans == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 236, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.translate_amount.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_16translate_amount_2__set__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((int)__pyx_v_ntrans)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5libmr_2MR_16translate_amount_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_ntrans) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__", 0); + + /* "libmr.pyx":237 + * return self.thisptr.get_translate_amount() + * def __set__(self, int ntrans): + * self.thisptr.set_translate_amount(ntrans) # <<<<<<<<<<<<<< + * property sign: + * def __get__(self): + */ + (void)(__pyx_v_self->thisptr->set_translate_amount(__pyx_v_ntrans)); + + /* "libmr.pyx":236 + * def __get__(self): + * return self.thisptr.get_translate_amount() + * def __set__(self, int ntrans): # <<<<<<<<<<<<<< + * self.thisptr.set_translate_amount(ntrans) + * property sign: + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":239 + * self.thisptr.set_translate_amount(ntrans) + * property sign: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_sign() + * def __set__(self, int nsign): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_4sign_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_4sign_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_4sign___get__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_4sign___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "libmr.pyx":240 + * property sign: + * def __get__(self): + * return self.thisptr.get_sign() # <<<<<<<<<<<<<< + * def __set__(self, int nsign): + * self.thisptr.set_sign(nsign) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->thisptr->get_sign()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":239 + * self.thisptr.set_translate_amount(ntrans) + * property sign: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_sign() + * def __set__(self, int nsign): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.sign.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":241 + * def __get__(self): + * return self.thisptr.get_sign() + * def __set__(self, int nsign): # <<<<<<<<<<<<<< + * self.thisptr.set_sign(nsign) + * property small_score: + */ + +/* Python wrapper */ +static int __pyx_pw_5libmr_2MR_4sign_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_nsign); /*proto*/ +static int __pyx_pw_5libmr_2MR_4sign_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_nsign) { + int __pyx_v_nsign; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); + assert(__pyx_arg_nsign); { + __pyx_v_nsign = __Pyx_PyInt_As_int(__pyx_arg_nsign); if (unlikely((__pyx_v_nsign == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 241, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.sign.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_4sign_2__set__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((int)__pyx_v_nsign)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5libmr_2MR_4sign_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, int __pyx_v_nsign) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__", 0); + + /* "libmr.pyx":242 + * return self.thisptr.get_sign() + * def __set__(self, int nsign): + * self.thisptr.set_sign(nsign) # <<<<<<<<<<<<<< + * property small_score: + * def __get__(self): + */ + (void)(__pyx_v_self->thisptr->set_sign(__pyx_v_nsign)); + + /* "libmr.pyx":241 + * def __get__(self): + * return self.thisptr.get_sign() + * def __set__(self, int nsign): # <<<<<<<<<<<<<< + * self.thisptr.set_sign(nsign) + * property small_score: + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":244 + * self.thisptr.set_sign(nsign) + * property small_score: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_small_score() + * def __set__(self, double nscore): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_11small_score_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_11small_score_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_11small_score___get__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_11small_score___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "libmr.pyx":245 + * property small_score: + * def __get__(self): + * return self.thisptr.get_small_score() # <<<<<<<<<<<<<< + * def __set__(self, double nscore): + * self.thisptr.set_small_score(nscore) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_self->thisptr->get_small_score()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":244 + * self.thisptr.set_sign(nsign) + * property small_score: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.get_small_score() + * def __set__(self, double nscore): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.small_score.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":246 + * def __get__(self): + * return self.thisptr.get_small_score() + * def __set__(self, double nscore): # <<<<<<<<<<<<<< + * self.thisptr.set_small_score(nscore) + * property verbose: + */ + +/* Python wrapper */ +static int __pyx_pw_5libmr_2MR_11small_score_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_nscore); /*proto*/ +static int __pyx_pw_5libmr_2MR_11small_score_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_nscore) { + double __pyx_v_nscore; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); + assert(__pyx_arg_nscore); { + __pyx_v_nscore = __pyx_PyFloat_AsDouble(__pyx_arg_nscore); if (unlikely((__pyx_v_nscore == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 246, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.small_score.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_11small_score_2__set__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((double)__pyx_v_nscore)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5libmr_2MR_11small_score_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, double __pyx_v_nscore) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__", 0); + + /* "libmr.pyx":247 + * return self.thisptr.get_small_score() + * def __set__(self, double nscore): + * self.thisptr.set_small_score(nscore) # <<<<<<<<<<<<<< + * property verbose: + * def __get__(self): + */ + (void)(__pyx_v_self->thisptr->set_small_score(__pyx_v_nscore)); + + /* "libmr.pyx":246 + * def __get__(self): + * return self.thisptr.get_small_score() + * def __set__(self, double nscore): # <<<<<<<<<<<<<< + * self.thisptr.set_small_score(nscore) + * property verbose: + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":249 + * self.thisptr.set_small_score(nscore) + * property verbose: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.verbose + * def __set__(self, bool verbose): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_7verbose_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_7verbose_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_7verbose___get__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_7verbose___get__(struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "libmr.pyx":250 + * property verbose: + * def __get__(self): + * return self.thisptr.verbose # <<<<<<<<<<<<<< + * def __set__(self, bool verbose): + * self.thisptr.verbose = verbose + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_self->thisptr->verbose); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 250, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "libmr.pyx":249 + * self.thisptr.set_small_score(nscore) + * property verbose: + * def __get__(self): # <<<<<<<<<<<<<< + * return self.thisptr.verbose + * def __set__(self, bool verbose): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.verbose.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":251 + * def __get__(self): + * return self.thisptr.verbose + * def __set__(self, bool verbose): # <<<<<<<<<<<<<< + * self.thisptr.verbose = verbose + * + */ + +/* Python wrapper */ +static int __pyx_pw_5libmr_2MR_7verbose_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_verbose); /*proto*/ +static int __pyx_pw_5libmr_2MR_7verbose_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_verbose) { + bool __pyx_v_verbose; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); + assert(__pyx_arg_verbose); { + __pyx_v_verbose = __Pyx_PyObject_IsTrue(__pyx_arg_verbose); if (unlikely((__pyx_v_verbose == ((bool)-1)) && PyErr_Occurred())) __PYX_ERR(0, 251, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("libmr.MR.verbose.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_5libmr_2MR_7verbose_2__set__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((bool)__pyx_v_verbose)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5libmr_2MR_7verbose_2__set__(struct __pyx_obj_5libmr_MR *__pyx_v_self, bool __pyx_v_verbose) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__set__", 0); + + /* "libmr.pyx":252 + * return self.thisptr.verbose + * def __set__(self, bool verbose): + * self.thisptr.verbose = verbose # <<<<<<<<<<<<<< + * + * def load_from_string(str input): + */ + __pyx_v_self->thisptr->verbose = __pyx_v_verbose; + + /* "libmr.pyx":251 + * def __get__(self): + * return self.thisptr.verbose + * def __set__(self, bool verbose): # <<<<<<<<<<<<<< + * self.thisptr.verbose = verbose + * + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_31__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_31__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_30__reduce_cython__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_30__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_5libmr_MR *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_2MR_33__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw_5libmr_2MR_33__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_5libmr_2MR_32__setstate_cython__(((struct __pyx_obj_5libmr_MR *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_2MR_32__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_5libmr_MR *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.MR.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "libmr.pyx":254 + * self.thisptr.verbose = verbose + * + * def load_from_string(str input): # <<<<<<<<<<<<<< + * """ + * Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR()) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_5libmr_1load_from_string(PyObject *__pyx_self, PyObject *__pyx_v_input); /*proto*/ +static char __pyx_doc_5libmr_load_from_string[] = "\n Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR())\n "; +static PyMethodDef __pyx_mdef_5libmr_1load_from_string = {"load_from_string", (PyCFunction)__pyx_pw_5libmr_1load_from_string, METH_O, __pyx_doc_5libmr_load_from_string}; +static PyObject *__pyx_pw_5libmr_1load_from_string(PyObject *__pyx_self, PyObject *__pyx_v_input) { + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("load_from_string (wrapper)", 0); + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_input), (&PyString_Type), 1, "input", 1))) __PYX_ERR(0, 254, __pyx_L1_error) + __pyx_r = __pyx_pf_5libmr_load_from_string(__pyx_self, ((PyObject*)__pyx_v_input)); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_5libmr_load_from_string(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_input) { + struct __pyx_obj_5libmr_MR *__pyx_v_pymr = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + std::string __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("load_from_string", 0); + + /* "libmr.pyx":258 + * Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR()) + * """ + * pymr = MR() # <<<<<<<<<<<<<< + * pymr.thisptr.from_string(input) + * return pymr + */ + __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_5libmr_MR)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_pymr = ((struct __pyx_obj_5libmr_MR *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "libmr.pyx":259 + * """ + * pymr = MR() + * pymr.thisptr.from_string(input) # <<<<<<<<<<<<<< + * return pymr + * + */ + __pyx_t_2 = __pyx_convert_string_from_py_std__in_string(__pyx_v_input); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 259, __pyx_L1_error) + __pyx_v_pymr->thisptr->from_string(__pyx_t_2); + + /* "libmr.pyx":260 + * pymr = MR() + * pymr.thisptr.from_string(input) + * return pymr # <<<<<<<<<<<<<< + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_pymr)); + __pyx_r = ((PyObject *)__pyx_v_pymr); + goto __pyx_L0; + + /* "libmr.pyx":254 + * self.thisptr.verbose = verbose + * + * def load_from_string(str input): # <<<<<<<<<<<<<< + * """ + * Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR()) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("libmr.load_from_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_pymr); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":734 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":735 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 735, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":734 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":737 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":738 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 738, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":737 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":740 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":741 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 741, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":740 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":743 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":744 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 744, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":743 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":746 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":747 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 747, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":746 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":749 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":750 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":751 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":750 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":753 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":749 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":868 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":869 + * + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< + * PyArray_SetBaseObject(arr, base) + * + */ + Py_INCREF(__pyx_v_base); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":870 + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":868 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":872 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_v_base; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":873 + * + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< + * if base is NULL: + * return None + */ + __pyx_v_base = PyArray_BASE(__pyx_v_arr); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":874 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + __pyx_t_1 = ((__pyx_v_base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":875 + * base = PyArray_BASE(arr) + * if base is NULL: + * return None # <<<<<<<<<<<<<< + * return base + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":874 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + } + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":876 + * if base is NULL: + * return None + * return base # <<<<<<<<<<<<<< + * + * # Versions of the import_* functions which are more suitable for + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_base)); + __pyx_r = ((PyObject *)__pyx_v_base); + goto __pyx_L0; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":872 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":880 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":881 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":882 + * cdef inline int import_array() except -1: + * try: + * __pyx_import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 882, __pyx_L3_error) + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":881 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":883 + * try: + * __pyx_import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 883, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":884 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 884, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(2, 884, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":881 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":880 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":886 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":887 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":888 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 888, __pyx_L3_error) + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":887 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":889 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 889, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":890 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 890, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(2, 890, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":887 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":886 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":892 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":893 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":894 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 894, __pyx_L3_error) + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":893 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":895 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 895, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":896 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef extern from *: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 896, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(2, 896, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":893 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":892 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":31 + * + * @cname("__pyx_convert_PyObject_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyObject_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyObject_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyObject_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyObject_string_to_py_std__in_string", 0); + + /* "string.to_py":32 + * @cname("__pyx_convert_PyObject_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyObject_string_to_py_std__in_string(const string& s): + * return __Pyx_PyObject_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyUnicode_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":31 + * + * @cname("__pyx_convert_PyObject_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyObject_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyObject_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyObject_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":37 + * + * @cname("__pyx_convert_PyUnicode_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyUnicode_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyUnicode_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyUnicode_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyUnicode_string_to_py_std__in_string", 0); + + /* "string.to_py":38 + * @cname("__pyx_convert_PyUnicode_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyUnicode_string_to_py_std__in_string(const string& s): + * return __Pyx_PyUnicode_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyStr_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyUnicode_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":37 + * + * @cname("__pyx_convert_PyUnicode_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyUnicode_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyUnicode_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyUnicode_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":43 + * + * @cname("__pyx_convert_PyStr_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyStr_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyStr_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyStr_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyStr_string_to_py_std__in_string", 0); + + /* "string.to_py":44 + * @cname("__pyx_convert_PyStr_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyStr_string_to_py_std__in_string(const string& s): + * return __Pyx_PyStr_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyBytes_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyStr_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 44, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":43 + * + * @cname("__pyx_convert_PyStr_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyStr_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyStr_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyStr_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":49 + * + * @cname("__pyx_convert_PyBytes_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyBytes_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyBytes_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyBytes_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyBytes_string_to_py_std__in_string", 0); + + /* "string.to_py":50 + * @cname("__pyx_convert_PyBytes_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyBytes_string_to_py_std__in_string(const string& s): + * return __Pyx_PyBytes_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyByteArray_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":49 + * + * @cname("__pyx_convert_PyBytes_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyBytes_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyBytes_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyBytes_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":55 + * + * @cname("__pyx_convert_PyByteArray_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyByteArray_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyByteArray_FromStringAndSize(s.data(), s.size()) + * + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyByteArray_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyByteArray_string_to_py_std__in_string", 0); + + /* "string.to_py":56 + * @cname("__pyx_convert_PyByteArray_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyByteArray_string_to_py_std__in_string(const string& s): + * return __Pyx_PyByteArray_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyByteArray_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":55 + * + * @cname("__pyx_convert_PyByteArray_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyByteArray_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyByteArray_FromStringAndSize(s.data(), s.size()) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyByteArray_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.from_py":13 + * + * @cname("__pyx_convert_string_from_py_std__in_string") + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: # <<<<<<<<<<<<<< + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + */ + +static std::string __pyx_convert_string_from_py_std__in_string(PyObject *__pyx_v_o) { + Py_ssize_t __pyx_v_length; + char const *__pyx_v_data; + std::string __pyx_r; + __Pyx_RefNannyDeclarations + char const *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_string_from_py_std__in_string", 0); + + /* "string.from_py":14 + * @cname("__pyx_convert_string_from_py_std__in_string") + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: + * cdef Py_ssize_t length = 0 # <<<<<<<<<<<<<< + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + * return string(data, length) + */ + __pyx_v_length = 0; + + /* "string.from_py":15 + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) # <<<<<<<<<<<<<< + * return string(data, length) + * + */ + __pyx_t_1 = __Pyx_PyObject_AsStringAndSize(__pyx_v_o, (&__pyx_v_length)); if (unlikely(__pyx_t_1 == ((char const *)NULL))) __PYX_ERR(1, 15, __pyx_L1_error) + __pyx_v_data = __pyx_t_1; + + /* "string.from_py":16 + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + * return string(data, length) # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = std::string(__pyx_v_data, __pyx_v_length); + goto __pyx_L0; + + /* "string.from_py":13 + * + * @cname("__pyx_convert_string_from_py_std__in_string") + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: # <<<<<<<<<<<<<< + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("string.from_py.__pyx_convert_string_from_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_pretend_to_initialize(&__pyx_r); + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + +/* Python wrapper */ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_shape = 0; + Py_ssize_t __pyx_v_itemsize; + PyObject *__pyx_v_format = 0; + PyObject *__pyx_v_mode = 0; + int __pyx_v_allocate_buffer; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; + PyObject* values[5] = {0,0,0,0,0}; + values[3] = ((PyObject *)__pyx_n_s_c); + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); + if (value) { values[4] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_shape = ((PyObject*)values[0]); + __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_v_format = values[2]; + __pyx_v_mode = values[3]; + if (values[4]) { + __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) + } else { + + /* "View.MemoryView":123 + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< + * + * cdef int idx + */ + __pyx_v_allocate_buffer = ((int)1); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) + if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { + PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) + } + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { + int __pyx_v_idx; + Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_dim; + PyObject **__pyx_v_p; + char __pyx_v_order; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + Py_ssize_t __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + __Pyx_INCREF(__pyx_v_format); + + /* "View.MemoryView":129 + * cdef PyObject **p + * + * self.ndim = len(shape) # <<<<<<<<<<<<<< + * self.itemsize = itemsize + * + */ + if (unlikely(__pyx_v_shape == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 129, __pyx_L1_error) + } + __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) + __pyx_v_self->ndim = ((int)__pyx_t_1); + + /* "View.MemoryView":130 + * + * self.ndim = len(shape) + * self.itemsize = itemsize # <<<<<<<<<<<<<< + * + * if not self.ndim: + */ + __pyx_v_self->itemsize = __pyx_v_itemsize; + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 133, __pyx_L1_error) + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + } + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 136, __pyx_L1_error) + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + } + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_format); + __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":139 + * + * if not isinstance(format, bytes): + * format = format.encode('ASCII') # <<<<<<<<<<<<<< + * self._format = format # keep a reference to the byte string + * self.format = self._format + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + } + + /* "View.MemoryView":140 + * if not isinstance(format, bytes): + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< + * self.format = self._format + * + */ + if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) + __pyx_t_3 = __pyx_v_format; + __Pyx_INCREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_v_self->_format); + __Pyx_DECREF(__pyx_v_self->_format); + __pyx_v_self->_format = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":141 + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + * self.format = self._format # <<<<<<<<<<<<<< + * + * + */ + if (unlikely(__pyx_v_self->_format == Py_None)) { + PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); + __PYX_ERR(1, 141, __pyx_L1_error) + } + __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) + __pyx_v_self->format = __pyx_t_7; + + /* "View.MemoryView":144 + * + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< + * self._strides = self._shape + self.ndim + * + */ + __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); + + /* "View.MemoryView":145 + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< + * + * if not self._shape: + */ + __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 148, __pyx_L1_error) + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + } + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + __pyx_t_8 = 0; + __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; + for (;;) { + if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_9; + __pyx_v_idx = __pyx_t_8; + __pyx_t_8 = (__pyx_t_8 + 1); + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":153 + * for idx, dim in enumerate(shape): + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< + * self._shape[idx] = dim + * + */ + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 153, __pyx_L1_error) + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + } + + /* "View.MemoryView":154 + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim # <<<<<<<<<<<<<< + * + * cdef char order + */ + (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) + if (__pyx_t_4) { + + /* "View.MemoryView":158 + * cdef char order + * if mode == 'fortran': + * order = b'F' # <<<<<<<<<<<<<< + * self.mode = u'fortran' + * elif mode == 'c': + */ + __pyx_v_order = 'F'; + + /* "View.MemoryView":159 + * if mode == 'fortran': + * order = b'F' + * self.mode = u'fortran' # <<<<<<<<<<<<<< + * elif mode == 'c': + * order = b'C' + */ + __Pyx_INCREF(__pyx_n_u_fortran); + __Pyx_GIVEREF(__pyx_n_u_fortran); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_fortran; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) + if (likely(__pyx_t_4)) { + + /* "View.MemoryView":161 + * self.mode = u'fortran' + * elif mode == 'c': + * order = b'C' # <<<<<<<<<<<<<< + * self.mode = u'c' + * else: + */ + __pyx_v_order = 'C'; + + /* "View.MemoryView":162 + * elif mode == 'c': + * order = b'C' + * self.mode = u'c' # <<<<<<<<<<<<<< + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + */ + __Pyx_INCREF(__pyx_n_u_c); + __Pyx_GIVEREF(__pyx_n_u_c); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_c; + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":164 + * self.mode = u'c' + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< + * + * self.len = fill_contig_strides_array(self._shape, self._strides, + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 164, __pyx_L1_error) + } + __pyx_L10:; + + /* "View.MemoryView":166 + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + * + * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< + * itemsize, self.ndim, order) + * + */ + __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); + + /* "View.MemoryView":169 + * itemsize, self.ndim, order) + * + * self.free_data = allocate_buffer # <<<<<<<<<<<<<< + * self.dtype_is_object = format == b'O' + * if allocate_buffer: + */ + __pyx_v_self->free_data = __pyx_v_allocate_buffer; + + /* "View.MemoryView":170 + * + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< + * if allocate_buffer: + * + */ + __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_v_self->dtype_is_object = __pyx_t_4; + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = (__pyx_v_allocate_buffer != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":174 + * + * + * self.data = malloc(self.len) # <<<<<<<<<<<<<< + * if not self.data: + * raise MemoryError("unable to allocate array data.") + */ + __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 176, __pyx_L1_error) + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":179 + * + * if self.dtype_is_object: + * p = self.data # <<<<<<<<<<<<<< + * for i in range(self.len / itemsize): + * p[i] = Py_None + */ + __pyx_v_p = ((PyObject **)__pyx_v_self->data); + + /* "View.MemoryView":180 + * if self.dtype_is_object: + * p = self.data + * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< + * p[i] = Py_None + * Py_INCREF(Py_None) + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); + __pyx_t_9 = __pyx_t_1; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { + __pyx_v_i = __pyx_t_11; + + /* "View.MemoryView":181 + * p = self.data + * for i in range(self.len / itemsize): + * p[i] = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + (__pyx_v_p[__pyx_v_i]) = Py_None; + + /* "View.MemoryView":182 + * for i in range(self.len / itemsize): + * p[i] = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + Py_INCREF(Py_None); + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + } + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_format); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_bufmode; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + char *__pyx_t_4; + Py_ssize_t __pyx_t_5; + int __pyx_t_6; + Py_ssize_t *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":186 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 # <<<<<<<<<<<<<< + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = -1; + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":188 + * cdef int bufmode = -1 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + goto __pyx_L3; + } + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":190 + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + */ + __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + } + __pyx_L3:; + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 192, __pyx_L1_error) + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + } + + /* "View.MemoryView":193 + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data # <<<<<<<<<<<<<< + * info.len = self.len + * info.ndim = self.ndim + */ + __pyx_t_4 = __pyx_v_self->data; + __pyx_v_info->buf = __pyx_t_4; + + /* "View.MemoryView":194 + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + * info.len = self.len # <<<<<<<<<<<<<< + * info.ndim = self.ndim + * info.shape = self._shape + */ + __pyx_t_5 = __pyx_v_self->len; + __pyx_v_info->len = __pyx_t_5; + + /* "View.MemoryView":195 + * info.buf = self.data + * info.len = self.len + * info.ndim = self.ndim # <<<<<<<<<<<<<< + * info.shape = self._shape + * info.strides = self._strides + */ + __pyx_t_6 = __pyx_v_self->ndim; + __pyx_v_info->ndim = __pyx_t_6; + + /* "View.MemoryView":196 + * info.len = self.len + * info.ndim = self.ndim + * info.shape = self._shape # <<<<<<<<<<<<<< + * info.strides = self._strides + * info.suboffsets = NULL + */ + __pyx_t_7 = __pyx_v_self->_shape; + __pyx_v_info->shape = __pyx_t_7; + + /* "View.MemoryView":197 + * info.ndim = self.ndim + * info.shape = self._shape + * info.strides = self._strides # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = self.itemsize + */ + __pyx_t_7 = __pyx_v_self->_strides; + __pyx_v_info->strides = __pyx_t_7; + + /* "View.MemoryView":198 + * info.shape = self._shape + * info.strides = self._strides + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = self.itemsize + * info.readonly = 0 + */ + __pyx_v_info->suboffsets = NULL; + + /* "View.MemoryView":199 + * info.strides = self._strides + * info.suboffsets = NULL + * info.itemsize = self.itemsize # <<<<<<<<<<<<<< + * info.readonly = 0 + * + */ + __pyx_t_5 = __pyx_v_self->itemsize; + __pyx_v_info->itemsize = __pyx_t_5; + + /* "View.MemoryView":200 + * info.suboffsets = NULL + * info.itemsize = self.itemsize + * info.readonly = 0 # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + __pyx_v_info->readonly = 0; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":203 + * + * if flags & PyBUF_FORMAT: + * info.format = self.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_4 = __pyx_v_self->format; + __pyx_v_info->format = __pyx_t_4; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":205 + * info.format = self.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.obj = self + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L5:; + + /* "View.MemoryView":207 + * info.format = NULL + * + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + +/* Python wrapper */ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":213 + * def __dealloc__(array self): + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) # <<<<<<<<<<<<<< + * elif self.free_data: + * if self.dtype_is_object: + */ + __pyx_v_self->callback_free_data(__pyx_v_self->data); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + __pyx_t_1 = (__pyx_v_self->free_data != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":216 + * elif self.free_data: + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< + * self._strides, self.ndim, False) + * free(self.data) + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + } + + /* "View.MemoryView":218 + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + * free(self.data) # <<<<<<<<<<<<<< + * PyObject_Free(self._shape) + * + */ + free(__pyx_v_self->data); + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + } + __pyx_L3:; + + /* "View.MemoryView":219 + * self._strides, self.ndim, False) + * free(self.data) + * PyObject_Free(self._shape) # <<<<<<<<<<<<<< + * + * @property + */ + PyObject_Free(__pyx_v_self->_shape); + + /* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":223 + * @property + * def memview(self): + * return self.get_memview() # <<<<<<<<<<<<<< + * + * @cname('get_memview') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_memview", 0); + + /* "View.MemoryView":227 + * @cname('get_memview') + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< + * return memoryview(self, flags, self.dtype_is_object) + * + */ + __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); + + /* "View.MemoryView":228 + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":231 + * + * def __len__(self): + * return self._shape[0] # <<<<<<<<<<<<<< + * + * def __getattr__(self, attr): + */ + __pyx_r = (__pyx_v_self->_shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getattr__", 0); + + /* "View.MemoryView":234 + * + * def __getattr__(self, attr): + * return getattr(self.memview, attr) # <<<<<<<<<<<<<< + * + * def __getitem__(self, item): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":237 + * + * def __getitem__(self, item): + * return self.memview[item] # <<<<<<<<<<<<<< + * + * def __setitem__(self, item, value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + +/* Python wrapper */ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + + /* "View.MemoryView":240 + * + * def __setitem__(self, item, value): + * self.memview[item] = value # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + +static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { + struct __pyx_array_obj *__pyx_v_result = 0; + struct __pyx_array_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("array_cwrapper", 0); + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":249 + * + * if buf == NULL: + * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); + __pyx_t_2 = 0; + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + /*else*/ { + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); + __pyx_t_4 = 0; + __pyx_t_5 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":252 + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) # <<<<<<<<<<<<<< + * result.data = buf + * + */ + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "View.MemoryView":253 + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) + * result.data = buf # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->data = __pyx_v_buf; + } + __pyx_L3:; + + /* "View.MemoryView":255 + * result.data = buf + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + +/* Python wrapper */ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_name = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_name = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__", 0); + + /* "View.MemoryView":282 + * cdef object name + * def __init__(self, name): + * self.name = name # <<<<<<<<<<<<<< + * def __repr__(self): + * return self.name + */ + __Pyx_INCREF(__pyx_v_name); + __Pyx_GIVEREF(__pyx_v_name); + __Pyx_GOTREF(__pyx_v_self->name); + __Pyx_DECREF(__pyx_v_self->name); + __pyx_v_self->name = __pyx_v_name; + + /* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + +/* Python wrapper */ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":284 + * self.name = name + * def __repr__(self): + * return self.name # <<<<<<<<<<<<<< + * + * cdef generic = Enum("") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->name); + __pyx_r = __pyx_v_self->name; + goto __pyx_L0; + + /* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_v_state = 0; + PyObject *__pyx_v__dict = 0; + int __pyx_v_use_setstate; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":5 + * cdef object _dict + * cdef bint use_setstate + * state = (self.name,) # <<<<<<<<<<<<<< + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_self->name); + __Pyx_GIVEREF(__pyx_v_self->name); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); + __pyx_v_state = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "(tree fragment)":6 + * cdef bint use_setstate + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< + * if _dict is not None: + * state += (_dict,) + */ + __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v__dict = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + __pyx_t_2 = (__pyx_v__dict != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":8 + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + * state += (_dict,) # <<<<<<<<<<<<<< + * use_setstate = True + * else: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v__dict); + __Pyx_GIVEREF(__pyx_v__dict); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); + __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "(tree fragment)":9 + * if _dict is not None: + * state += (_dict,) + * use_setstate = True # <<<<<<<<<<<<<< + * else: + * use_setstate = self.name is not None + */ + __pyx_v_use_setstate = 1; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + goto __pyx_L3; + } + + /* "(tree fragment)":11 + * use_setstate = True + * else: + * use_setstate = self.name is not None # <<<<<<<<<<<<<< + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_self->name != Py_None); + __pyx_v_use_setstate = __pyx_t_3; + } + __pyx_L3:; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + __pyx_t_3 = (__pyx_v_use_setstate != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":13 + * use_setstate = self.name is not None + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); + __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); + __pyx_t_4 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + } + + /* "(tree fragment)":15 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); + __pyx_t_5 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + } + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_state); + __Pyx_XDECREF(__pyx_v__dict); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":17 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) + __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + +static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { + Py_intptr_t __pyx_v_aligned_p; + size_t __pyx_v_offset; + void *__pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":300 + * cdef void *align_pointer(void *memory, size_t alignment) nogil: + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< + * cdef size_t offset + * + */ + __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); + + /* "View.MemoryView":304 + * + * with cython.cdivision(True): + * offset = aligned_p % alignment # <<<<<<<<<<<<<< + * + * if offset > 0: + */ + __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + __pyx_t_1 = ((__pyx_v_offset > 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":307 + * + * if offset > 0: + * aligned_p += alignment - offset # <<<<<<<<<<<<<< + * + * return aligned_p + */ + __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + } + + /* "View.MemoryView":309 + * aligned_p += alignment - offset + * + * return aligned_p # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = ((void *)__pyx_v_aligned_p); + goto __pyx_L0; + + /* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + +/* Python wrapper */ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_obj = 0; + int __pyx_v_flags; + int __pyx_v_dtype_is_object; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); + if (value) { values[2] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_obj = values[0]; + __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + if (values[2]) { + __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + __pyx_v_dtype_is_object = ((int)0); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "View.MemoryView":346 + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj # <<<<<<<<<<<<<< + * self.flags = flags + * if type(self) is memoryview or obj is not None: + */ + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + __Pyx_GOTREF(__pyx_v_self->obj); + __Pyx_DECREF(__pyx_v_self->obj); + __pyx_v_self->obj = __pyx_v_obj; + + /* "View.MemoryView":347 + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj + * self.flags = flags # <<<<<<<<<<<<<< + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + */ + __pyx_v_self->flags = __pyx_v_flags; + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); + __pyx_t_3 = (__pyx_t_2 != 0); + if (!__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_3 = (__pyx_v_obj != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "View.MemoryView":349 + * self.flags = flags + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + */ + __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":351 + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; + + /* "View.MemoryView":352 + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * global __pyx_memoryview_thread_locks_used + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + } + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + } + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":356 + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + */ + __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + + /* "View.MemoryView":357 + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":359 + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< + * if self.lock is NULL: + * raise MemoryError + */ + __pyx_v_self->lock = PyThread_allocate_lock(); + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":361 + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + } + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":364 + * + * if flags & PyBUF_FORMAT: + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< + * else: + * self.dtype_is_object = dtype_is_object + */ + __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_self->dtype_is_object = __pyx_t_1; + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + goto __pyx_L10; + } + + /* "View.MemoryView":366 + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + */ + /*else*/ { + __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; + } + __pyx_L10:; + + /* "View.MemoryView":368 + * self.dtype_is_object = dtype_is_object + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL + */ + __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); + + /* "View.MemoryView":370 + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL # <<<<<<<<<<<<<< + * + * def __dealloc__(memoryview self): + */ + __pyx_v_self->typeinfo = NULL; + + /* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + +/* Python wrapper */ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { + int __pyx_v_i; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyThread_type_lock __pyx_t_6; + PyThread_type_lock __pyx_t_7; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + __pyx_t_1 = (__pyx_v_self->obj != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":374 + * def __dealloc__(memoryview self): + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + */ + __Pyx_ReleaseBuffer((&__pyx_v_self->view)); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":377 + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< + * Py_DECREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; + + /* "View.MemoryView":378 + * + * (<__pyx_buffer *> &self.view).obj = NULL + * Py_DECREF(Py_None) # <<<<<<<<<<<<<< + * + * cdef int i + */ + Py_DECREF(Py_None); + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + } + __pyx_L3:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":383 + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + */ + __pyx_t_3 = __pyx_memoryview_thread_locks_used; + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":385 + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":388 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< + * break + * else: + */ + __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); + + /* "View.MemoryView":387 + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break + */ + (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; + (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + } + + /* "View.MemoryView":389 + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break # <<<<<<<<<<<<<< + * else: + * PyThread_free_lock(self.lock) + */ + goto __pyx_L6_break; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + } + } + /*else*/ { + + /* "View.MemoryView":391 + * break + * else: + * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + */ + PyThread_free_lock(__pyx_v_self->lock); + } + __pyx_L6_break:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + } + + /* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + Py_ssize_t __pyx_v_dim; + char *__pyx_v_itemp; + PyObject *__pyx_v_idx = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + Py_ssize_t __pyx_t_6; + char *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_item_pointer", 0); + + /* "View.MemoryView":395 + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< + * + * for dim, idx in enumerate(index): + */ + __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + __pyx_t_1 = 0; + if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { + __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } else { + if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 397, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_5); + } + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); + __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_1; + __pyx_t_1 = (__pyx_t_1 + 1); + + /* "View.MemoryView":398 + * + * for dim, idx in enumerate(index): + * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< + * + * return itemp + */ + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_7; + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":400 + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + * return itemp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_itemp; + goto __pyx_L0; + + /* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_indices = NULL; + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + char *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":405 + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: + * return self # <<<<<<<<<<<<<< + * + * have_slices, indices = _unellipsify(index, self.view.ndim) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __pyx_r = ((PyObject *)__pyx_v_self); + goto __pyx_L0; + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + } + + /* "View.MemoryView":407 + * return self + * + * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * cdef char *itemp + */ + __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (likely(__pyx_t_3 != Py_None)) { + PyObject* sequence = __pyx_t_3; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 407, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + #else + __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_v_indices = __pyx_t_5; + __pyx_t_5 = 0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) + if (__pyx_t_2) { + + /* "View.MemoryView":411 + * cdef char *itemp + * if have_slices: + * return memview_slice(self, indices) # <<<<<<<<<<<<<< + * else: + * itemp = self.get_item_pointer(indices) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + } + + /* "View.MemoryView":413 + * return memview_slice(self, indices) + * else: + * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< + * return self.convert_item_to_object(itemp) + * + */ + /*else*/ { + __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_6; + + /* "View.MemoryView":414 + * else: + * itemp = self.get_item_pointer(indices) + * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< + * + * def __setitem__(memoryview self, object index, object value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_indices); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + +/* Python wrapper */ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_obj = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + __Pyx_INCREF(__pyx_v_index); + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + __pyx_t_1 = (__pyx_v_self->view.readonly != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 418, __pyx_L1_error) + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + } + + /* "View.MemoryView":420 + * raise TypeError("Cannot assign to read-only memoryview") + * + * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * if have_slices: + */ + __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(__pyx_t_2 != Py_None)) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 420, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_3; + __pyx_t_3 = 0; + __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":423 + * + * if have_slices: + * obj = self.is_slice(value) # <<<<<<<<<<<<<< + * if obj: + * self.setitem_slice_assignment(self[index], obj) + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_obj = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":425 + * obj = self.is_slice(value) + * if obj: + * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< + * else: + * self.setitem_slice_assign_scalar(self[index], value) + */ + __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":427 + * self.setitem_slice_assignment(self[index], obj) + * else: + * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< + * else: + * self.setitem_indexed(index, value) + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L5:; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":429 + * self.setitem_slice_assign_scalar(self[index], value) + * else: + * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< + * + * cdef is_slice(self, obj): + */ + /*else*/ { + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L4:; + + /* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_slice", 0); + __Pyx_INCREF(__pyx_v_obj); + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + /*try:*/ { + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":435 + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) # <<<<<<<<<<<<<< + * except TypeError: + * return None + */ + __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); + __pyx_t_6 = 0; + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L9_try_end; + __pyx_L4_error:; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "View.MemoryView":436 + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + * except TypeError: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); + if (__pyx_t_9) { + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GOTREF(__pyx_t_8); + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":437 + * self.dtype_is_object) + * except TypeError: + * return None # <<<<<<<<<<<<<< + * + * return obj + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_except_return; + } + goto __pyx_L6_except_error; + __pyx_L6_except_error:; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L1_error; + __pyx_L7_except_return:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L0; + __pyx_L9_try_end:; + } + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + } + + /* "View.MemoryView":439 + * return None + * + * return obj # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assignment(self, dst, src): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { + __Pyx_memviewslice __pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_src_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + __Pyx_memviewslice *__pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":446 + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< + * src.ndim, dst.ndim, self.dtype_is_object) + * + */ + if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) + __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) + + /* "View.MemoryView":447 + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { + int __pyx_v_array[0x80]; + void *__pyx_v_tmp; + void *__pyx_v_item; + __Pyx_memviewslice *__pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_tmp_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + char const *__pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); + + /* "View.MemoryView":451 + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + * cdef int array[128] + * cdef void *tmp = NULL # <<<<<<<<<<<<<< + * cdef void *item + * + */ + __pyx_v_tmp = NULL; + + /* "View.MemoryView":456 + * cdef __Pyx_memviewslice *dst_slice + * cdef __Pyx_memviewslice tmp_slice + * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< + * + * if self.view.itemsize > sizeof(array): + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) + __pyx_v_dst_slice = __pyx_t_1; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":459 + * + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< + * if tmp == NULL: + * raise MemoryError + */ + __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":461 + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * item = tmp + * else: + */ + PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + } + + /* "View.MemoryView":462 + * if tmp == NULL: + * raise MemoryError + * item = tmp # <<<<<<<<<<<<<< + * else: + * item = array + */ + __pyx_v_item = __pyx_v_tmp; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":464 + * item = tmp + * else: + * item = array # <<<<<<<<<<<<<< + * + * try: + */ + /*else*/ { + __pyx_v_item = ((void *)__pyx_v_array); + } + __pyx_L3:; + + /* "View.MemoryView":466 + * item = array + * + * try: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * ( item)[0] = value + */ + /*try:*/ { + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":468 + * try: + * if self.dtype_is_object: + * ( item)[0] = value # <<<<<<<<<<<<<< + * else: + * self.assign_item_from_object( item, value) + */ + (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":470 + * ( item)[0] = value + * else: + * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L8:; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":475 + * + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + * item, self.dtype_is_object) + */ + __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + } + + /* "View.MemoryView":476 + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< + * item, self.dtype_is_object) + * finally: + */ + __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); + } + + /* "View.MemoryView":479 + * item, self.dtype_is_object) + * finally: + * PyMem_Free(tmp) # <<<<<<<<<<<<<< + * + * cdef setitem_indexed(self, index, value): + */ + /*finally:*/ { + /*normal exit:*/{ + PyMem_Free(__pyx_v_tmp); + goto __pyx_L7; + } + __pyx_L6_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; + { + PyMem_Free(__pyx_v_tmp); + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); + } + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_XGIVEREF(__pyx_t_8); + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; + goto __pyx_L1_error; + } + __pyx_L7:; + } + + /* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_indexed", 0); + + /* "View.MemoryView":482 + * + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< + * self.assign_item_from_object(itemp, value) + * + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_1; + + /* "View.MemoryView":483 + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_v_struct = NULL; + PyObject *__pyx_v_bytesitem = 0; + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + PyObject *__pyx_t_9 = NULL; + size_t __pyx_t_10; + int __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":488 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef bytes bytesitem + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":491 + * cdef bytes bytesitem + * + * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< + * try: + * result = struct.unpack(self.view.format, bytesitem) + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "View.MemoryView":493 + * bytesitem = itemp[:self.view.itemsize] + * try: + * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< + * except struct.error: + * raise ValueError("Unable to convert item to object") + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + __pyx_t_8 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_8 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + { + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_9); + if (__pyx_t_7) { + __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; + } + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); + __Pyx_INCREF(__pyx_v_bytesitem); + __Pyx_GIVEREF(__pyx_v_bytesitem); + PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); + __pyx_t_6 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + } + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + /*else:*/ { + __pyx_t_10 = strlen(__pyx_v_self->view.format); + __pyx_t_11 = ((__pyx_t_10 == 1) != 0); + if (__pyx_t_11) { + + /* "View.MemoryView":498 + * else: + * if len(self.view.format) == 1: + * return result[0] # <<<<<<<<<<<<<< + * return result + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L6_except_return; + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + } + + /* "View.MemoryView":499 + * if len(self.view.format) == 1: + * return result[0] + * return result # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_result); + __pyx_r = __pyx_v_result; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "View.MemoryView":494 + * try: + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: # <<<<<<<<<<<<<< + * raise ValueError("Unable to convert item to object") + * else: + */ + __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); + __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; + if (__pyx_t_8) { + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_1); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 495, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesitem); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_v_struct = NULL; + char __pyx_v_c; + PyObject *__pyx_v_bytesvalue = 0; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + char *__pyx_t_11; + char *__pyx_t_12; + char *__pyx_t_13; + char *__pyx_t_14; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":504 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef char c + * cdef bytes bytesvalue + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + __pyx_t_2 = PyTuple_Check(__pyx_v_value); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "View.MemoryView":510 + * + * if isinstance(value, tuple): + * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< + * else: + * bytesvalue = struct.pack(self.view.format, value) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":512 + * bytesvalue = struct.pack(self.view.format, *value) + * else: + * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< + * + * for i, c in enumerate(bytesvalue): + */ + /*else*/ { + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + { + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); + __Pyx_INCREF(__pyx_v_value); + __Pyx_GIVEREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); + __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = 0; + if (unlikely(__pyx_v_bytesvalue == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); + __PYX_ERR(1, 514, __pyx_L1_error) + } + __Pyx_INCREF(__pyx_v_bytesvalue); + __pyx_t_10 = __pyx_v_bytesvalue; + __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); + __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); + for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { + __pyx_t_11 = __pyx_t_14; + __pyx_v_c = (__pyx_t_11[0]); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + __pyx_v_i = __pyx_t_9; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = (__pyx_t_9 + 1); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; + } + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesvalue); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + char *__pyx_t_5; + void *__pyx_t_6; + int __pyx_t_7; + Py_ssize_t __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_self->view.readonly != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 520, __pyx_L1_error) + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + } + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":523 + * + * if flags & PyBUF_ND: + * info.shape = self.view.shape # <<<<<<<<<<<<<< + * else: + * info.shape = NULL + */ + __pyx_t_4 = __pyx_v_self->view.shape; + __pyx_v_info->shape = __pyx_t_4; + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":525 + * info.shape = self.view.shape + * else: + * info.shape = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_STRIDES: + */ + /*else*/ { + __pyx_v_info->shape = NULL; + } + __pyx_L6:; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":528 + * + * if flags & PyBUF_STRIDES: + * info.strides = self.view.strides # <<<<<<<<<<<<<< + * else: + * info.strides = NULL + */ + __pyx_t_4 = __pyx_v_self->view.strides; + __pyx_v_info->strides = __pyx_t_4; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + goto __pyx_L7; + } + + /* "View.MemoryView":530 + * info.strides = self.view.strides + * else: + * info.strides = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_INDIRECT: + */ + /*else*/ { + __pyx_v_info->strides = NULL; + } + __pyx_L7:; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":533 + * + * if flags & PyBUF_INDIRECT: + * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< + * else: + * info.suboffsets = NULL + */ + __pyx_t_4 = __pyx_v_self->view.suboffsets; + __pyx_v_info->suboffsets = __pyx_t_4; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":535 + * info.suboffsets = self.view.suboffsets + * else: + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + /*else*/ { + __pyx_v_info->suboffsets = NULL; + } + __pyx_L8:; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":538 + * + * if flags & PyBUF_FORMAT: + * info.format = self.view.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_5 = __pyx_v_self->view.format; + __pyx_v_info->format = __pyx_t_5; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":540 + * info.format = self.view.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.buf = self.view.buf + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L9:; + + /* "View.MemoryView":542 + * info.format = NULL + * + * info.buf = self.view.buf # <<<<<<<<<<<<<< + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + */ + __pyx_t_6 = __pyx_v_self->view.buf; + __pyx_v_info->buf = __pyx_t_6; + + /* "View.MemoryView":543 + * + * info.buf = self.view.buf + * info.ndim = self.view.ndim # <<<<<<<<<<<<<< + * info.itemsize = self.view.itemsize + * info.len = self.view.len + */ + __pyx_t_7 = __pyx_v_self->view.ndim; + __pyx_v_info->ndim = __pyx_t_7; + + /* "View.MemoryView":544 + * info.buf = self.view.buf + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< + * info.len = self.view.len + * info.readonly = self.view.readonly + */ + __pyx_t_8 = __pyx_v_self->view.itemsize; + __pyx_v_info->itemsize = __pyx_t_8; + + /* "View.MemoryView":545 + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + * info.len = self.view.len # <<<<<<<<<<<<<< + * info.readonly = self.view.readonly + * info.obj = self + */ + __pyx_t_8 = __pyx_v_self->view.len; + __pyx_v_info->len = __pyx_t_8; + + /* "View.MemoryView":546 + * info.itemsize = self.view.itemsize + * info.len = self.view.len + * info.readonly = self.view.readonly # <<<<<<<<<<<<<< + * info.obj = self + * + */ + __pyx_t_1 = __pyx_v_self->view.readonly; + __pyx_v_info->readonly = __pyx_t_1; + + /* "View.MemoryView":547 + * info.len = self.view.len + * info.readonly = self.view.readonly + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":554 + * @property + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< + * transpose_memslice(&result.from_slice) + * return result + */ + __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":555 + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) + + /* "View.MemoryView":556 + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + * return result # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":560 + * @property + * def base(self): + * return self.obj # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->obj); + __pyx_r = __pyx_v_self->obj; + goto __pyx_L0; + + /* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_length; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":564 + * @property + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_length = (__pyx_t_2[0]); + __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_stride; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 570, __pyx_L1_error) + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + } + + /* "View.MemoryView":572 + * raise ValueError("Buffer view does not expose strides") + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_v_stride = (__pyx_t_3[0]); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + Py_ssize_t *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__16, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + } + + /* "View.MemoryView":579 + * return (-1,) * self.view.ndim + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); + for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { + __pyx_t_4 = __pyx_t_6; + __pyx_v_suboffset = (__pyx_t_4[0]); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":583 + * @property + * def ndim(self): + * return self.view.ndim # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":587 + * @property + * def itemsize(self): + * return self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":591 + * @property + * def nbytes(self): + * return self.size * self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + __pyx_t_1 = (__pyx_v_self->_size == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":596 + * def size(self): + * if self._size is None: + * result = 1 # <<<<<<<<<<<<<< + * + * for length in self.view.shape[:self.view.ndim]: + */ + __Pyx_INCREF(__pyx_int_1); + __pyx_v_result = __pyx_int_1; + + /* "View.MemoryView":598 + * result = 1 + * + * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< + * result *= length + * + */ + __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); + __pyx_t_6 = 0; + + /* "View.MemoryView":599 + * + * for length in self.view.shape[:self.view.ndim]: + * result *= length # <<<<<<<<<<<<<< + * + * self._size = result + */ + __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); + __pyx_t_6 = 0; + } + + /* "View.MemoryView":601 + * result *= length + * + * self._size = result # <<<<<<<<<<<<<< + * + * return self._size + */ + __Pyx_INCREF(__pyx_v_result); + __Pyx_GIVEREF(__pyx_v_result); + __Pyx_GOTREF(__pyx_v_self->_size); + __Pyx_DECREF(__pyx_v_self->_size); + __pyx_v_self->_size = __pyx_v_result; + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + } + + /* "View.MemoryView":603 + * self._size = result + * + * return self._size # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->_size); + __pyx_r = __pyx_v_self->_size; + goto __pyx_L0; + + /* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":607 + * def __len__(self): + * if self.view.ndim >= 1: + * return self.view.shape[0] # <<<<<<<<<<<<<< + * + * return 0 + */ + __pyx_r = (__pyx_v_self->view.shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + } + + /* "View.MemoryView":609 + * return self.view.shape[0] + * + * return 0 # <<<<<<<<<<<<<< + * + * def __repr__(self): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":613 + * def __repr__(self): + * return "" % (self.base.__class__.__name__, + * id(self)) # <<<<<<<<<<<<<< + * + * def __str__(self): + */ + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__str__", 0); + + /* "View.MemoryView":616 + * + * def __str__(self): + * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_c_contig", 0); + + /* "View.MemoryView":622 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":623 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< + * + * def is_f_contig(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_f_contig", 0); + + /* "View.MemoryView":628 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":629 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< + * + * def copy(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_mslice; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy", 0); + + /* "View.MemoryView":633 + * def copy(self): + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &mslice) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); + + /* "View.MemoryView":635 + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + * + * slice_copy(self, &mslice) # <<<<<<<<<<<<<< + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); + + /* "View.MemoryView":636 + * + * slice_copy(self, &mslice) + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_C_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":641 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< + * + * def copy_fortran(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy_fortran", 0); + + /* "View.MemoryView":645 + * def copy_fortran(self): + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &src) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); + + /* "View.MemoryView":647 + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + * + * slice_copy(self, &src) # <<<<<<<<<<<<<< + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); + + /* "View.MemoryView":648 + * + * slice_copy(self, &src) + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_F_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) + __pyx_v_dst = __pyx_t_1; + + /* "View.MemoryView":653 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + +static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { + struct __pyx_memoryview_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); + + /* "View.MemoryView":658 + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< + * result.typeinfo = typeinfo + * return result + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_o); + __Pyx_GIVEREF(__pyx_v_o); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":659 + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_v_result->typeinfo = __pyx_v_typeinfo; + + /* "View.MemoryView":660 + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_check') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("memoryview_check", 0); + + /* "View.MemoryView":664 + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): + * return isinstance(o, memoryview) # <<<<<<<<<<<<<< + * + * cdef tuple _unellipsify(object index, int ndim): + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + +static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { + PyObject *__pyx_v_tup = NULL; + PyObject *__pyx_v_result = NULL; + int __pyx_v_have_slices; + int __pyx_v_seen_ellipsis; + CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; + PyObject *__pyx_v_item = NULL; + Py_ssize_t __pyx_v_nslices; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + PyObject *(*__pyx_t_6)(PyObject *); + PyObject *__pyx_t_7 = NULL; + Py_ssize_t __pyx_t_8; + int __pyx_t_9; + int __pyx_t_10; + PyObject *__pyx_t_11 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_unellipsify", 0); + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + __pyx_t_1 = PyTuple_Check(__pyx_v_index); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":672 + * """ + * if not isinstance(index, tuple): + * tup = (index,) # <<<<<<<<<<<<<< + * else: + * tup = index + */ + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_index); + __Pyx_GIVEREF(__pyx_v_index); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); + __pyx_v_tup = __pyx_t_3; + __pyx_t_3 = 0; + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":674 + * tup = (index,) + * else: + * tup = index # <<<<<<<<<<<<<< + * + * result = [] + */ + /*else*/ { + __Pyx_INCREF(__pyx_v_index); + __pyx_v_tup = __pyx_v_index; + } + __pyx_L3:; + + /* "View.MemoryView":676 + * tup = index + * + * result = [] # <<<<<<<<<<<<<< + * have_slices = False + * seen_ellipsis = False + */ + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_result = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":677 + * + * result = [] + * have_slices = False # <<<<<<<<<<<<<< + * seen_ellipsis = False + * for idx, item in enumerate(tup): + */ + __pyx_v_have_slices = 0; + + /* "View.MemoryView":678 + * result = [] + * have_slices = False + * seen_ellipsis = False # <<<<<<<<<<<<<< + * for idx, item in enumerate(tup): + * if item is Ellipsis: + */ + __pyx_v_seen_ellipsis = 0; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + __Pyx_INCREF(__pyx_int_0); + __pyx_t_3 = __pyx_int_0; + if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { + __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; + __pyx_t_6 = NULL; + } else { + __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_6)) { + if (likely(PyList_CheckExact(__pyx_t_4))) { + if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } else { + if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } + } else { + __pyx_t_7 = __pyx_t_6(__pyx_t_4); + if (unlikely(!__pyx_t_7)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 679, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_7); + } + __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_INCREF(__pyx_t_3); + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); + __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_3); + __pyx_t_3 = __pyx_t_7; + __pyx_t_7 = 0; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__19); + __Pyx_GIVEREF(__pyx_slice__19); + PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__19); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":683 + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True # <<<<<<<<<<<<<< + * else: + * result.append(slice(None)) + */ + __pyx_v_seen_ellipsis = 1; + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + goto __pyx_L7; + } + + /* "View.MemoryView":685 + * seen_ellipsis = True + * else: + * result.append(slice(None)) # <<<<<<<<<<<<<< + * have_slices = True + * else: + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__19); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":686 + * else: + * result.append(slice(None)) + * have_slices = True # <<<<<<<<<<<<<< + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + */ + __pyx_v_have_slices = 1; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + goto __pyx_L6; + } + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + /*else*/ { + __pyx_t_2 = PySlice_Check(__pyx_v_item); + __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); + __pyx_t_1 = __pyx_t_10; + __pyx_L9_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":689 + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< + * + * have_slices = have_slices or isinstance(item, slice) + */ + __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(1, 689, __pyx_L1_error) + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + } + + /* "View.MemoryView":691 + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< + * result.append(item) + * + */ + __pyx_t_10 = (__pyx_v_have_slices != 0); + if (!__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = PySlice_Check(__pyx_v_item); + __pyx_t_2 = (__pyx_t_10 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_have_slices = __pyx_t_1; + + /* "View.MemoryView":692 + * + * have_slices = have_slices or isinstance(item, slice) + * result.append(item) # <<<<<<<<<<<<<< + * + * nslices = ndim - len(result) + */ + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) + } + __pyx_L6:; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":694 + * result.append(item) + * + * nslices = ndim - len(result) # <<<<<<<<<<<<<< + * if nslices: + * result.extend([slice(None)] * nslices) + */ + __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) + __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + __pyx_t_1 = (__pyx_v_nslices != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":696 + * nslices = ndim - len(result) + * if nslices: + * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< + * + * return have_slices or nslices, tuple(result) + */ + __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__19); + __Pyx_GIVEREF(__pyx_slice__19); + PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__19); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + } + + /* "View.MemoryView":698 + * result.extend([slice(None)] * nslices) + * + * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + */ + __Pyx_XDECREF(__pyx_r); + if (!__pyx_v_have_slices) { + } else { + __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L14_bool_binop_done; + } + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_L14_bool_binop_done:; + __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_r = ((PyObject*)__pyx_t_11); + __pyx_t_11 = 0; + goto __pyx_L0; + + /* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_tup); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_XDECREF(__pyx_v_item); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + +static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); + + /* "View.MemoryView":701 + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") + */ + __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); + for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { + __pyx_t_1 = __pyx_t_3; + __pyx_v_suboffset = (__pyx_t_1[0]); + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(1, 703, __pyx_L1_error) + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + } + } + + /* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { + int __pyx_v_new_ndim; + int __pyx_v_suboffset_dim; + int __pyx_v_dim; + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + __Pyx_memviewslice *__pyx_v_p_src; + struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; + __Pyx_memviewslice *__pyx_v_p_dst; + int *__pyx_v_p_suboffset_dim; + Py_ssize_t __pyx_v_start; + Py_ssize_t __pyx_v_stop; + Py_ssize_t __pyx_v_step; + int __pyx_v_have_start; + int __pyx_v_have_stop; + int __pyx_v_have_step; + PyObject *__pyx_v_index = NULL; + struct __pyx_memoryview_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + struct __pyx_memoryview_obj *__pyx_t_4; + char *__pyx_t_5; + int __pyx_t_6; + Py_ssize_t __pyx_t_7; + PyObject *(*__pyx_t_8)(PyObject *); + PyObject *__pyx_t_9 = NULL; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + Py_ssize_t __pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memview_slice", 0); + + /* "View.MemoryView":711 + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): + * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< + * cdef bint negative_step + * cdef __Pyx_memviewslice src, dst + */ + __pyx_v_new_ndim = 0; + __pyx_v_suboffset_dim = -1; + + /* "View.MemoryView":718 + * + * + * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< + * + * cdef _memoryviewslice memviewsliceobj + */ + (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); + + /* "View.MemoryView":722 + * cdef _memoryviewslice memviewsliceobj + * + * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(1, 722, __pyx_L1_error) + } + } + #endif + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":725 + * + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview # <<<<<<<<<<<<<< + * p_src = &memviewsliceobj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":726 + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, &src) + */ + __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + goto __pyx_L3; + } + + /* "View.MemoryView":728 + * p_src = &memviewsliceobj.from_slice + * else: + * slice_copy(memview, &src) # <<<<<<<<<<<<<< + * p_src = &src + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); + + /* "View.MemoryView":729 + * else: + * slice_copy(memview, &src) + * p_src = &src # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_p_src = (&__pyx_v_src); + } + __pyx_L3:; + + /* "View.MemoryView":735 + * + * + * dst.memview = p_src.memview # <<<<<<<<<<<<<< + * dst.data = p_src.data + * + */ + __pyx_t_4 = __pyx_v_p_src->memview; + __pyx_v_dst.memview = __pyx_t_4; + + /* "View.MemoryView":736 + * + * dst.memview = p_src.memview + * dst.data = p_src.data # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_v_p_src->data; + __pyx_v_dst.data = __pyx_t_5; + + /* "View.MemoryView":741 + * + * + * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< + * cdef int *p_suboffset_dim = &suboffset_dim + * cdef Py_ssize_t start, stop, step + */ + __pyx_v_p_dst = (&__pyx_v_dst); + + /* "View.MemoryView":742 + * + * cdef __Pyx_memviewslice *p_dst = &dst + * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< + * cdef Py_ssize_t start, stop, step + * cdef bint have_start, have_stop, have_step + */ + __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + __pyx_t_6 = 0; + if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { + __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; + __pyx_t_8 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_8)) { + if (likely(PyList_CheckExact(__pyx_t_3))) { + if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } else { + if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } + } else { + __pyx_t_9 = __pyx_t_8(__pyx_t_3); + if (unlikely(!__pyx_t_9)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 746, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_9); + } + __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); + __pyx_t_9 = 0; + __pyx_v_dim = __pyx_t_6; + __pyx_t_6 = (__pyx_t_6 + 1); + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":751 + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< + * 0, 0, 0, # have_{start,stop,step} + * False) + */ + __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) + + /* "View.MemoryView":748 + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + goto __pyx_L6; + } + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + __pyx_t_2 = (__pyx_v_index == Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":755 + * False) + * elif index is None: + * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + */ + (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; + + /* "View.MemoryView":756 + * elif index is None: + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 + */ + (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; + + /* "View.MemoryView":757 + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< + * new_ndim += 1 + * else: + */ + (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; + + /* "View.MemoryView":758 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 # <<<<<<<<<<<<<< + * else: + * start = index.start or 0 + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + goto __pyx_L6; + } + + /* "View.MemoryView":760 + * new_ndim += 1 + * else: + * start = index.start or 0 # <<<<<<<<<<<<<< + * stop = index.stop or 0 + * step = index.step or 0 + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L7_bool_binop_done:; + __pyx_v_start = __pyx_t_10; + + /* "View.MemoryView":761 + * else: + * start = index.start or 0 + * stop = index.stop or 0 # <<<<<<<<<<<<<< + * step = index.step or 0 + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L9_bool_binop_done:; + __pyx_v_stop = __pyx_t_10; + + /* "View.MemoryView":762 + * start = index.start or 0 + * stop = index.stop or 0 + * step = index.step or 0 # <<<<<<<<<<<<<< + * + * have_start = index.start is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L11_bool_binop_done:; + __pyx_v_step = __pyx_t_10; + + /* "View.MemoryView":764 + * step = index.step or 0 + * + * have_start = index.start is not None # <<<<<<<<<<<<<< + * have_stop = index.stop is not None + * have_step = index.step is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_start = __pyx_t_1; + + /* "View.MemoryView":765 + * + * have_start = index.start is not None + * have_stop = index.stop is not None # <<<<<<<<<<<<<< + * have_step = index.step is not None + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_stop = __pyx_t_1; + + /* "View.MemoryView":766 + * have_start = index.start is not None + * have_stop = index.stop is not None + * have_step = index.step is not None # <<<<<<<<<<<<<< + * + * slice_memviewslice( + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_step = __pyx_t_1; + + /* "View.MemoryView":768 + * have_step = index.step is not None + * + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) + + /* "View.MemoryView":774 + * have_start, have_stop, have_step, + * True) + * new_ndim += 1 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + } + __pyx_L6:; + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":778 + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< + * memviewsliceobj.to_dtype_func, + * memview.dtype_is_object) + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } + + /* "View.MemoryView":779 + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * else: + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + } + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + /*else*/ { + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":783 + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { + Py_ssize_t __pyx_v_new_shape; + int __pyx_v_negative_step; + int __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + __pyx_t_1 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":830 + * + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + } + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + __pyx_t_1 = (0 <= __pyx_v_start); + if (__pyx_t_1) { + __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); + } + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":832 + * start += shape + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< + * else: + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + } + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":835 + * else: + * + * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< + * + * if have_step and step == 0: + */ + /*else*/ { + __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step < 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L6_bool_binop_done:; + __pyx_v_negative_step = __pyx_t_2; + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + __pyx_t_1 = (__pyx_v_have_step != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step == 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L9_bool_binop_done:; + if (__pyx_t_2) { + + /* "View.MemoryView":838 + * + * if have_step and step == 0: + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + } + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + __pyx_t_2 = (__pyx_v_have_start != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":843 + * if have_start: + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if start < 0: + * start = 0 + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":845 + * start += shape + * if start < 0: + * start = 0 # <<<<<<<<<<<<<< + * elif start >= shape: + * if negative_step: + */ + __pyx_v_start = 0; + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + } + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + goto __pyx_L12; + } + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":848 + * elif start >= shape: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = shape + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L14; + } + + /* "View.MemoryView":850 + * start = shape - 1 + * else: + * start = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + /*else*/ { + __pyx_v_start = __pyx_v_shape; + } + __pyx_L14:; + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + } + __pyx_L12:; + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + goto __pyx_L11; + } + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":853 + * else: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = 0 + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L15; + } + + /* "View.MemoryView":855 + * start = shape - 1 + * else: + * start = 0 # <<<<<<<<<<<<<< + * + * if have_stop: + */ + /*else*/ { + __pyx_v_start = 0; + } + __pyx_L15:; + } + __pyx_L11:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + __pyx_t_2 = (__pyx_v_have_stop != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":859 + * if have_stop: + * if stop < 0: + * stop += shape # <<<<<<<<<<<<<< + * if stop < 0: + * stop = 0 + */ + __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":861 + * stop += shape + * if stop < 0: + * stop = 0 # <<<<<<<<<<<<<< + * elif stop > shape: + * stop = shape + */ + __pyx_v_stop = 0; + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + } + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + goto __pyx_L17; + } + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":863 + * stop = 0 + * elif stop > shape: + * stop = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + __pyx_v_stop = __pyx_v_shape; + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + } + __pyx_L17:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + goto __pyx_L16; + } + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":866 + * else: + * if negative_step: + * stop = -1 # <<<<<<<<<<<<<< + * else: + * stop = shape + */ + __pyx_v_stop = -1L; + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + goto __pyx_L19; + } + + /* "View.MemoryView":868 + * stop = -1 + * else: + * stop = shape # <<<<<<<<<<<<<< + * + * if not have_step: + */ + /*else*/ { + __pyx_v_stop = __pyx_v_shape; + } + __pyx_L19:; + } + __pyx_L16:; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":871 + * + * if not have_step: + * step = 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_step = 1; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + } + + /* "View.MemoryView":875 + * + * with cython.cdivision(True): + * new_shape = (stop - start) // step # <<<<<<<<<<<<<< + * + * if (stop - start) - step * new_shape: + */ + __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":878 + * + * if (stop - start) - step * new_shape: + * new_shape += 1 # <<<<<<<<<<<<<< + * + * if new_shape < 0: + */ + __pyx_v_new_shape = (__pyx_v_new_shape + 1); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + } + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":881 + * + * if new_shape < 0: + * new_shape = 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_new_shape = 0; + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + } + + /* "View.MemoryView":884 + * + * + * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset + */ + (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); + + /* "View.MemoryView":885 + * + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< + * dst.suboffsets[new_ndim] = suboffset + * + */ + (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; + + /* "View.MemoryView":886 + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; + } + __pyx_L3:; + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":890 + * + * if suboffset_dim[0] < 0: + * dst.data += start * stride # <<<<<<<<<<<<<< + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride + */ + __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + goto __pyx_L23; + } + + /* "View.MemoryView":892 + * dst.data += start * stride + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< + * + * if suboffset >= 0: + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_suboffset_dim[0]); + (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); + } + __pyx_L23:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":897 + * if not is_slice: + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + */ + __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + goto __pyx_L26; + } + + /* "View.MemoryView":899 + * dst.data = ( dst.data)[0] + suboffset + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< + * "must be indexed and not sliced", dim) + * else: + */ + /*else*/ { + + /* "View.MemoryView":900 + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< + * else: + * suboffset_dim[0] = new_ndim + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) + } + __pyx_L26:; + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + goto __pyx_L25; + } + + /* "View.MemoryView":902 + * "must be indexed and not sliced", dim) + * else: + * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< + * + * return 0 + */ + /*else*/ { + (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; + } + __pyx_L25:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + } + + /* "View.MemoryView":904 + * suboffset_dim[0] = new_ndim + * + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + +static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_suboffset; + Py_ssize_t __pyx_v_itemsize; + char *__pyx_v_resultp; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("pybuffer_index", 0); + + /* "View.MemoryView":912 + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< + * cdef Py_ssize_t itemsize = view.itemsize + * cdef char *resultp + */ + __pyx_v_suboffset = -1L; + + /* "View.MemoryView":913 + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< + * cdef char *resultp + * + */ + __pyx_t_1 = __pyx_v_view->itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":917 + * + * if view.ndim == 0: + * shape = view.len / itemsize # <<<<<<<<<<<<<< + * stride = itemsize + * else: + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); + + /* "View.MemoryView":918 + * if view.ndim == 0: + * shape = view.len / itemsize + * stride = itemsize # <<<<<<<<<<<<<< + * else: + * shape = view.shape[dim] + */ + __pyx_v_stride = __pyx_v_itemsize; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + goto __pyx_L3; + } + + /* "View.MemoryView":920 + * stride = itemsize + * else: + * shape = view.shape[dim] # <<<<<<<<<<<<<< + * stride = view.strides[dim] + * if view.suboffsets != NULL: + */ + /*else*/ { + __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); + + /* "View.MemoryView":921 + * else: + * shape = view.shape[dim] + * stride = view.strides[dim] # <<<<<<<<<<<<<< + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] + */ + __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":923 + * stride = view.strides[dim] + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< + * + * if index < 0: + */ + __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + } + } + __pyx_L3:; + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":926 + * + * if index < 0: + * index += view.shape[dim] # <<<<<<<<<<<<<< + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + */ + __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":928 + * index += view.shape[dim] + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * if index >= shape: + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 928, __pyx_L1_error) + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + } + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":931 + * + * if index >= shape: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * resultp = bufp + index * stride + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 931, __pyx_L1_error) + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":933 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * resultp = bufp + index * stride # <<<<<<<<<<<<<< + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset + */ + __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":935 + * resultp = bufp + index * stride + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< + * + * return resultp + */ + __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + } + + /* "View.MemoryView":937 + * resultp = ( resultp)[0] + suboffset + * + * return resultp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_resultp; + goto __pyx_L0; + + /* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + +static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { + int __pyx_v_ndim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + int __pyx_v_i; + int __pyx_v_j; + int __pyx_r; + int __pyx_t_1; + Py_ssize_t *__pyx_t_2; + long __pyx_t_3; + long __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":944 + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: + * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< + * + * cdef Py_ssize_t *shape = memslice.shape + */ + __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; + __pyx_v_ndim = __pyx_t_1; + + /* "View.MemoryView":946 + * cdef int ndim = memslice.memview.view.ndim + * + * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< + * cdef Py_ssize_t *strides = memslice.strides + * + */ + __pyx_t_2 = __pyx_v_memslice->shape; + __pyx_v_shape = __pyx_t_2; + + /* "View.MemoryView":947 + * + * cdef Py_ssize_t *shape = memslice.shape + * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = __pyx_v_memslice->strides; + __pyx_v_strides = __pyx_t_2; + + /* "View.MemoryView":951 + * + * cdef int i, j + * for i in range(ndim / 2): # <<<<<<<<<<<<<< + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + */ + __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":952 + * cdef int i, j + * for i in range(ndim / 2): + * j = ndim - 1 - i # <<<<<<<<<<<<<< + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] + */ + __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); + + /* "View.MemoryView":953 + * for i in range(ndim / 2): + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< + * shape[i], shape[j] = shape[j], shape[i] + * + */ + __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); + __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); + (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; + (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; + + /* "View.MemoryView":954 + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + */ + __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); + __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); + (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; + (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); + if (!__pyx_t_8) { + } else { + __pyx_t_7 = __pyx_t_8; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); + __pyx_t_7 = __pyx_t_8; + __pyx_L6_bool_binop_done:; + if (__pyx_t_7) { + + /* "View.MemoryView":957 + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< + * + * return 1 + */ + __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + } + } + + /* "View.MemoryView":959 + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + * return 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 1; + goto __pyx_L0; + + /* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = 0; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + +/* Python wrapper */ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":977 + * + * def __dealloc__(self): + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); + + /* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":981 + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) # <<<<<<<<<<<<<< + * else: + * return memoryview.convert_item_to_object(self, itemp) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + } + + /* "View.MemoryView":983 + * return self.to_object_func(itemp) + * else: + * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":987 + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< + * else: + * memoryview.assign_item_from_object(self, itemp, value) + */ + __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":989 + * self.to_dtype_func(itemp, value) + * else: + * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< + * + * @property + */ + /*else*/ { + __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":993 + * @property + * def base(self): + * return self.from_object # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->from_object); + __pyx_r = __pyx_v_self->from_object; + goto __pyx_L0; + + /* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + __Pyx_TypeInfo *__pyx_t_4; + Py_buffer __pyx_t_5; + Py_ssize_t *__pyx_t_6; + Py_ssize_t *__pyx_t_7; + Py_ssize_t *__pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_fromslice", 0); + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1008 + * + * if memviewslice.memview == Py_None: + * return None # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + } + + /* "View.MemoryView":1013 + * + * + * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< + * + * result.from_slice = memviewslice + */ + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1015 + * result = _memoryviewslice(None, 0, dtype_is_object) + * + * result.from_slice = memviewslice # <<<<<<<<<<<<<< + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + */ + __pyx_v_result->from_slice = __pyx_v_memviewslice; + + /* "View.MemoryView":1016 + * + * result.from_slice = memviewslice + * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< + * + * result.from_object = ( memviewslice.memview).base + */ + __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); + + /* "View.MemoryView":1018 + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< + * result.typeinfo = memviewslice.memview.typeinfo + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_v_result->from_object); + __Pyx_DECREF(__pyx_v_result->from_object); + __pyx_v_result->from_object = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":1019 + * + * result.from_object = ( memviewslice.memview).base + * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< + * + * result.view = memviewslice.memview.view + */ + __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; + __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; + + /* "View.MemoryView":1021 + * result.typeinfo = memviewslice.memview.typeinfo + * + * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + */ + __pyx_t_5 = __pyx_v_memviewslice.memview->view; + __pyx_v_result->__pyx_base.view = __pyx_t_5; + + /* "View.MemoryView":1022 + * + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + */ + __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); + + /* "View.MemoryView":1023 + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data + * result.view.ndim = ndim # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; + + /* "View.MemoryView":1024 + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; + + /* "View.MemoryView":1025 + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1028 + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< + * else: + * result.flags = PyBUF_RECORDS_RO + */ + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1030 + * result.flags = PyBUF_RECORDS + * else: + * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< + * + * result.view.shape = result.from_slice.shape + */ + /*else*/ { + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; + } + __pyx_L4:; + + /* "View.MemoryView":1032 + * result.flags = PyBUF_RECORDS_RO + * + * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< + * result.view.strides = result.from_slice.strides + * + */ + __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); + + /* "View.MemoryView":1033 + * + * result.view.shape = result.from_slice.shape + * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); + + /* "View.MemoryView":1036 + * + * + * result.view.suboffsets = NULL # <<<<<<<<<<<<<< + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + */ + __pyx_v_result->__pyx_base.view.suboffsets = NULL; + + /* "View.MemoryView":1037 + * + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + */ + __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_v_suboffset = (__pyx_t_6[0]); + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1039 + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); + + /* "View.MemoryView":1040 + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + * break # <<<<<<<<<<<<<< + * + * result.view.len = result.view.itemsize + */ + goto __pyx_L6_break; + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + } + } + __pyx_L6_break:; + + /* "View.MemoryView":1042 + * break + * + * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< + * for length in result.view.shape[:ndim]: + * result.view.len *= length + */ + __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + + /* "View.MemoryView":1043 + * + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< + * result.view.len *= length + * + */ + __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1044 + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: + * result.view.len *= length # <<<<<<<<<<<<<< + * + * result.to_object_func = to_object_func + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + } + + /* "View.MemoryView":1046 + * result.view.len *= length + * + * result.to_object_func = to_object_func # <<<<<<<<<<<<<< + * result.to_dtype_func = to_dtype_func + * + */ + __pyx_v_result->to_object_func = __pyx_v_to_object_func; + + /* "View.MemoryView":1047 + * + * result.to_object_func = to_object_func + * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; + + /* "View.MemoryView":1049 + * result.to_dtype_func = to_dtype_func + * + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { + struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; + __Pyx_memviewslice *__pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_slice_from_memview", 0); + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1056 + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): + * obj = memview # <<<<<<<<<<<<<< + * return &obj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":1057 + * if isinstance(memview, _memoryviewslice): + * obj = memview + * return &obj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, mslice) + */ + __pyx_r = (&__pyx_v_obj->from_slice); + goto __pyx_L0; + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + } + + /* "View.MemoryView":1059 + * return &obj.from_slice + * else: + * slice_copy(memview, mslice) # <<<<<<<<<<<<<< + * return mslice + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); + + /* "View.MemoryView":1060 + * else: + * slice_copy(memview, mslice) + * return mslice # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_slice_copy') + */ + __pyx_r = __pyx_v_mslice; + goto __pyx_L0; + } + + /* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_obj); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { + int __pyx_v_dim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + Py_ssize_t *__pyx_v_suboffsets; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + Py_ssize_t __pyx_t_5; + __Pyx_RefNannySetupContext("slice_copy", 0); + + /* "View.MemoryView":1067 + * cdef (Py_ssize_t*) shape, strides, suboffsets + * + * shape = memview.view.shape # <<<<<<<<<<<<<< + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets + */ + __pyx_t_1 = __pyx_v_memview->view.shape; + __pyx_v_shape = __pyx_t_1; + + /* "View.MemoryView":1068 + * + * shape = memview.view.shape + * strides = memview.view.strides # <<<<<<<<<<<<<< + * suboffsets = memview.view.suboffsets + * + */ + __pyx_t_1 = __pyx_v_memview->view.strides; + __pyx_v_strides = __pyx_t_1; + + /* "View.MemoryView":1069 + * shape = memview.view.shape + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< + * + * dst.memview = <__pyx_memoryview *> memview + */ + __pyx_t_1 = __pyx_v_memview->view.suboffsets; + __pyx_v_suboffsets = __pyx_t_1; + + /* "View.MemoryView":1071 + * suboffsets = memview.view.suboffsets + * + * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< + * dst.data = memview.view.buf + * + */ + __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); + + /* "View.MemoryView":1072 + * + * dst.memview = <__pyx_memoryview *> memview + * dst.data = memview.view.buf # <<<<<<<<<<<<<< + * + * for dim in range(memview.view.ndim): + */ + __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); + + /* "View.MemoryView":1074 + * dst.data = memview.view.buf + * + * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + */ + __pyx_t_2 = __pyx_v_memview->view.ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_dim = __pyx_t_4; + + /* "View.MemoryView":1075 + * + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + */ + (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); + + /* "View.MemoryView":1076 + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + * + */ + (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); + + /* "View.MemoryView":1077 + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object') + */ + if ((__pyx_v_suboffsets != 0)) { + __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); + } else { + __pyx_t_5 = -1L; + } + (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; + } + + /* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { + __Pyx_memviewslice __pyx_v_memviewslice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy", 0); + + /* "View.MemoryView":1083 + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< + * return memoryview_copy_from_slice(memview, &memviewslice) + * + */ + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); + + /* "View.MemoryView":1084 + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) + * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object_from_slice') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { + PyObject *(*__pyx_v_to_object_func)(char *); + int (*__pyx_v_to_dtype_func)(char *, PyObject *); + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *(*__pyx_t_3)(char *); + int (*__pyx_t_4)(char *, PyObject *); + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1095 + * + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + */ + __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; + __pyx_v_to_object_func = __pyx_t_3; + + /* "View.MemoryView":1096 + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< + * else: + * to_object_func = NULL + */ + __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; + __pyx_v_to_dtype_func = __pyx_t_4; + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1098 + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + * to_object_func = NULL # <<<<<<<<<<<<<< + * to_dtype_func = NULL + * + */ + /*else*/ { + __pyx_v_to_object_func = NULL; + + /* "View.MemoryView":1099 + * else: + * to_object_func = NULL + * to_dtype_func = NULL # <<<<<<<<<<<<<< + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + */ + __pyx_v_to_dtype_func = NULL; + } + __pyx_L3:; + + /* "View.MemoryView":1101 + * to_dtype_func = NULL + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< + * to_object_func, to_dtype_func, + * memview.dtype_is_object) + */ + __Pyx_XDECREF(__pyx_r); + + /* "View.MemoryView":1103 + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + * to_object_func, to_dtype_func, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + +static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { + Py_ssize_t __pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + __pyx_t_1 = ((__pyx_v_arg < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1111 + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: + * return -arg # <<<<<<<<<<<<<< + * else: + * return arg + */ + __pyx_r = (-__pyx_v_arg); + goto __pyx_L0; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + } + + /* "View.MemoryView":1113 + * return -arg + * else: + * return arg # <<<<<<<<<<<<<< + * + * @cname('__pyx_get_best_slice_order') + */ + /*else*/ { + __pyx_r = __pyx_v_arg; + goto __pyx_L0; + } + + /* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + +static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { + int __pyx_v_i; + Py_ssize_t __pyx_v_c_stride; + Py_ssize_t __pyx_v_f_stride; + char __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1121 + * """ + * cdef int i + * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< + * cdef Py_ssize_t f_stride = 0 + * + */ + __pyx_v_c_stride = 0; + + /* "View.MemoryView":1122 + * cdef int i + * cdef Py_ssize_t c_stride = 0 + * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_f_stride = 0; + + /* "View.MemoryView":1124 + * cdef Py_ssize_t f_stride = 0 + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1126 + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1127 + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + goto __pyx_L4_break; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L4_break:; + + /* "View.MemoryView":1129 + * break + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + */ + __pyx_t_1 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_1; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1131 + * for i in range(ndim): + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1132 + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + */ + goto __pyx_L7_break; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L7_break:; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1135 + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + * return 'C' # <<<<<<<<<<<<<< + * else: + * return 'F' + */ + __pyx_r = 'C'; + goto __pyx_L0; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + } + + /* "View.MemoryView":1137 + * return 'C' + * else: + * return 'F' # <<<<<<<<<<<<<< + * + * @cython.cdivision(True) + */ + /*else*/ { + __pyx_r = 'F'; + goto __pyx_L0; + } + + /* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + +static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; + Py_ssize_t __pyx_v_dst_extent; + Py_ssize_t __pyx_v_src_stride; + Py_ssize_t __pyx_v_dst_stride; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + + /* "View.MemoryView":1147 + * + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + */ + __pyx_v_src_extent = (__pyx_v_src_shape[0]); + + /* "View.MemoryView":1148 + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] + */ + __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); + + /* "View.MemoryView":1149 + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + */ + __pyx_v_src_stride = (__pyx_v_src_strides[0]); + + /* "View.MemoryView":1150 + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + + /* "View.MemoryView":1154 + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + */ + __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); + if (__pyx_t_2) { + __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); + } + __pyx_t_3 = (__pyx_t_2 != 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L5_bool_binop_done:; + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + if (__pyx_t_1) { + + /* "View.MemoryView":1155 + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1157 + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1158 + * else: + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< + * src_data += src_stride + * dst_data += dst_stride + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); + + /* "View.MemoryView":1159 + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * else: + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1160 + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L4:; + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1162 + * dst_data += dst_stride + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * _copy_strided_to_strided(src_data, src_strides + 1, + * dst_data, dst_strides + 1, + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1163 + * else: + * for i in range(dst_extent): + * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< + * dst_data, dst_strides + 1, + * src_shape + 1, dst_shape + 1, + */ + _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); + + /* "View.MemoryView":1167 + * src_shape + 1, dst_shape + 1, + * ndim - 1, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1168 + * ndim - 1, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + + /* function exit code */ +} + +/* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + +static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + + /* "View.MemoryView":1173 + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< + * src.shape, dst.shape, ndim, itemsize) + * + */ + _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_size; + Py_ssize_t __pyx_r; + Py_ssize_t __pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + + /* "View.MemoryView":1179 + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< + * + * for shape in src.shape[:ndim]: + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_size = __pyx_t_1; + + /* "View.MemoryView":1181 + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + * + * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< + * size *= shape + * + */ + __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); + for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_shape = (__pyx_t_2[0]); + + /* "View.MemoryView":1182 + * + * for shape in src.shape[:ndim]: + * size *= shape # <<<<<<<<<<<<<< + * + * return size + */ + __pyx_v_size = (__pyx_v_size * __pyx_v_shape); + } + + /* "View.MemoryView":1184 + * size *= shape + * + * return size # <<<<<<<<<<<<<< + * + * @cname('__pyx_fill_contig_strides_array') + */ + __pyx_r = __pyx_v_size; + goto __pyx_L0; + + /* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { + int __pyx_v_idx; + Py_ssize_t __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + __pyx_t_1 = ((__pyx_v_order == 'F') != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1197 + * + * if order == 'F': + * for idx in range(ndim): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + __pyx_t_2 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_idx = __pyx_t_4; + + /* "View.MemoryView":1198 + * if order == 'F': + * for idx in range(ndim): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * else: + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1199 + * for idx in range(ndim): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * else: + * for idx in range(ndim - 1, -1, -1): + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1201 + * stride *= shape[idx] + * else: + * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + /*else*/ { + for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { + __pyx_v_idx = __pyx_t_2; + + /* "View.MemoryView":1202 + * else: + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1203 + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * + * return stride + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + } + __pyx_L3:; + + /* "View.MemoryView":1205 + * stride *= shape[idx] + * + * return stride # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_data_to_temp') + */ + __pyx_r = __pyx_v_stride; + goto __pyx_L0; + + /* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { + int __pyx_v_i; + void *__pyx_v_result; + size_t __pyx_v_itemsize; + size_t __pyx_v_size; + void *__pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + struct __pyx_memoryview_obj *__pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1219 + * cdef void *result + * + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef size_t size = slice_get_size(src, ndim) + * + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1220 + * + * cdef size_t itemsize = src.memview.view.itemsize + * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< + * + * result = malloc(size) + */ + __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); + + /* "View.MemoryView":1222 + * cdef size_t size = slice_get_size(src, ndim) + * + * result = malloc(size) # <<<<<<<<<<<<<< + * if not result: + * _err(MemoryError, NULL) + */ + __pyx_v_result = malloc(__pyx_v_size); + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1224 + * result = malloc(size) + * if not result: + * _err(MemoryError, NULL) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + } + + /* "View.MemoryView":1227 + * + * + * tmpslice.data = result # <<<<<<<<<<<<<< + * tmpslice.memview = src.memview + * for i in range(ndim): + */ + __pyx_v_tmpslice->data = ((char *)__pyx_v_result); + + /* "View.MemoryView":1228 + * + * tmpslice.data = result + * tmpslice.memview = src.memview # <<<<<<<<<<<<<< + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + */ + __pyx_t_4 = __pyx_v_src->memview; + __pyx_v_tmpslice->memview = __pyx_t_4; + + /* "View.MemoryView":1229 + * tmpslice.data = result + * tmpslice.memview = src.memview + * for i in range(ndim): # <<<<<<<<<<<<<< + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1230 + * tmpslice.memview = src.memview + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< + * tmpslice.suboffsets[i] = -1 + * + */ + (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); + + /* "View.MemoryView":1231 + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, + */ + (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1233 + * tmpslice.suboffsets[i] = -1 + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< + * ndim, order) + * + */ + (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); + + /* "View.MemoryView":1237 + * + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1239 + * for i in range(ndim): + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< + * + * if slice_is_contig(src[0], order, ndim): + */ + (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + } + } + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1242 + * + * if slice_is_contig(src[0], order, ndim): + * memcpy(result, src.data, size) # <<<<<<<<<<<<<< + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + */ + (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":1244 + * memcpy(result, src.data, size) + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< + * + * return result + */ + /*else*/ { + copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); + } + __pyx_L9:; + + /* "View.MemoryView":1246 + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = NULL; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + +static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_extents", 0); + + /* "View.MemoryView":1254 + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + * (i, extent1, extent2)) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err_dim') + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":1253 + * cdef int _err_extents(int i, Py_ssize_t extent1, + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< + * (i, extent1, extent2)) + * + */ + __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 1253, __pyx_L1_error) + + /* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + +static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_dim", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1258 + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: + * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err') + */ + __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_INCREF(__pyx_v_error); + __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 1258, __pyx_L1_error) + + /* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + +static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":1263 + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: + * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< + * else: + * raise error + */ + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_error); + __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 1263, __pyx_L1_error) + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + } + + /* "View.MemoryView":1265 + * raise error(msg.decode('ascii')) + * else: + * raise error # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_contents') + */ + /*else*/ { + __Pyx_Raise(__pyx_v_error, 0, 0, 0); + __PYX_ERR(1, 1265, __pyx_L1_error) + } + + /* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { + void *__pyx_v_tmpdata; + size_t __pyx_v_itemsize; + int __pyx_v_i; + char __pyx_v_order; + int __pyx_v_broadcasting; + int __pyx_v_direct_copy; + __Pyx_memviewslice __pyx_v_tmp; + int __pyx_v_ndim; + int __pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + void *__pyx_t_7; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1276 + * Check for overlapping memory and verify the shapes. + * """ + * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + */ + __pyx_v_tmpdata = NULL; + + /* "View.MemoryView":1277 + * """ + * cdef void *tmpdata = NULL + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + */ + __pyx_t_1 = __pyx_v_src.memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1279 + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< + * cdef bint broadcasting = False + * cdef bint direct_copy = False + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); + + /* "View.MemoryView":1280 + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False # <<<<<<<<<<<<<< + * cdef bint direct_copy = False + * cdef __Pyx_memviewslice tmp + */ + __pyx_v_broadcasting = 0; + + /* "View.MemoryView":1281 + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False + * cdef bint direct_copy = False # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice tmp + * + */ + __pyx_v_direct_copy = 0; + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1285 + * + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1287 + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< + * + * cdef int ndim = max(src_ndim, dst_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + } + __pyx_L3:; + + /* "View.MemoryView":1289 + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + __pyx_t_3 = __pyx_v_dst_ndim; + __pyx_t_4 = __pyx_v_src_ndim; + if (((__pyx_t_3 > __pyx_t_4) != 0)) { + __pyx_t_5 = __pyx_t_3; + } else { + __pyx_t_5 = __pyx_t_4; + } + __pyx_v_ndim = __pyx_t_5; + + /* "View.MemoryView":1291 + * cdef int ndim = max(src_ndim, dst_ndim) + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + */ + __pyx_t_5 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_5; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1294 + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + * broadcasting = True # <<<<<<<<<<<<<< + * src.strides[i] = 0 + * else: + */ + __pyx_v_broadcasting = 1; + + /* "View.MemoryView":1295 + * if src.shape[i] == 1: + * broadcasting = True + * src.strides[i] = 0 # <<<<<<<<<<<<<< + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) + */ + (__pyx_v_src.strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + goto __pyx_L7; + } + + /* "View.MemoryView":1297 + * src.strides[i] = 0 + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< + * + * if src.suboffsets[i] >= 0: + */ + /*else*/ { + __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + } + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1300 + * + * if src.suboffsets[i] >= 0: + * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< + * + * if slices_overlap(&src, &dst, ndim, itemsize): + */ + __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + } + } + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1305 + * + * if not slice_is_contig(src, order, ndim): + * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + } + + /* "View.MemoryView":1307 + * order = get_best_order(&dst, ndim) + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< + * src = tmp + * + */ + __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) + __pyx_v_tmpdata = __pyx_t_7; + + /* "View.MemoryView":1308 + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + * src = tmp # <<<<<<<<<<<<<< + * + * if not broadcasting: + */ + __pyx_v_src = __pyx_v_tmp; + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1314 + * + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + goto __pyx_L12; + } + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1316 + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< + * + * if direct_copy: + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + } + __pyx_L12:; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_2 = (__pyx_v_direct_copy != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1320 + * if direct_copy: + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1321 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + */ + (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); + + /* "View.MemoryView":1322 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * free(tmpdata) + * return 0 + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1323 + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1324 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * if order == 'F' == get_best_order(&dst, ndim): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = (__pyx_v_order == 'F'); + if (__pyx_t_2) { + __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); + } + __pyx_t_8 = (__pyx_t_2 != 0); + if (__pyx_t_8) { + + /* "View.MemoryView":1329 + * + * + * transpose_memslice(&src) # <<<<<<<<<<<<<< + * transpose_memslice(&dst) + * + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) + + /* "View.MemoryView":1330 + * + * transpose_memslice(&src) + * transpose_memslice(&dst) # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1332 + * transpose_memslice(&dst) + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1333 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + */ + copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1334 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * free(tmpdata) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1336 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1337 + * + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_broadcast_leading') + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { + int __pyx_v_i; + int __pyx_v_offset; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + + /* "View.MemoryView":1344 + * int ndim_other) nogil: + * cdef int i + * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); + + /* "View.MemoryView":1346 + * cdef int offset = ndim_other - ndim + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1347 + * + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + */ + (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); + + /* "View.MemoryView":1348 + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + */ + (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1349 + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< + * + * for i in range(offset): + */ + (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); + } + + /* "View.MemoryView":1351 + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + * for i in range(offset): # <<<<<<<<<<<<<< + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + */ + __pyx_t_1 = __pyx_v_offset; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1352 + * + * for i in range(offset): + * mslice.shape[i] = 1 # <<<<<<<<<<<<<< + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 + */ + (__pyx_v_mslice->shape[__pyx_v_i]) = 1; + + /* "View.MemoryView":1353 + * for i in range(offset): + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< + * mslice.suboffsets[i] = -1 + * + */ + (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); + + /* "View.MemoryView":1354 + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { + int __pyx_t_1; + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + __pyx_t_1 = (__pyx_v_dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1367 + * + * if dtype_is_object: + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< + * dst.strides, ndim, inc) + * + */ + __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + } + + /* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + + /* function exit code */ +} + +/* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + __Pyx_RefNannyDeclarations + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); + + /* "View.MemoryView":1374 + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif +} + +/* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + +static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); + + /* "View.MemoryView":1381 + * cdef Py_ssize_t i + * + * for i in range(shape[0]): # <<<<<<<<<<<<<< + * if ndim == 1: + * if inc: + */ + __pyx_t_1 = (__pyx_v_shape[0]); + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + __pyx_t_4 = (__pyx_v_inc != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1384 + * if ndim == 1: + * if inc: + * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * Py_DECREF(( data)[0]) + */ + Py_INCREF((((PyObject **)__pyx_v_data)[0])); + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":1386 + * Py_INCREF(( data)[0]) + * else: + * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + */ + /*else*/ { + Py_DECREF((((PyObject **)__pyx_v_data)[0])); + } + __pyx_L6:; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + goto __pyx_L5; + } + + /* "View.MemoryView":1388 + * Py_DECREF(( data)[0]) + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, inc) + * + */ + /*else*/ { + + /* "View.MemoryView":1389 + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + * ndim - 1, inc) # <<<<<<<<<<<<<< + * + * data += strides[0] + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); + } + __pyx_L5:; + + /* "View.MemoryView":1391 + * ndim - 1, inc) + * + * data += strides[0] # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); + } + + /* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { + + /* "View.MemoryView":1400 + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1401 + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1403 + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + +static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_extent; + int __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + + /* "View.MemoryView":1411 + * size_t itemsize, void *item) nogil: + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t extent = shape[0] + * + */ + __pyx_v_stride = (__pyx_v_strides[0]); + + /* "View.MemoryView":1412 + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] + * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_extent = (__pyx_v_shape[0]); + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1415 + * + * if ndim == 1: + * for i in range(extent): # <<<<<<<<<<<<<< + * memcpy(data, item, itemsize) + * data += stride + */ + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1416 + * if ndim == 1: + * for i in range(extent): + * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< + * data += stride + * else: + */ + (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); + + /* "View.MemoryView":1417 + * for i in range(extent): + * memcpy(data, item, itemsize) + * data += stride # <<<<<<<<<<<<<< + * else: + * for i in range(extent): + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1419 + * data += stride + * else: + * for i in range(extent): # <<<<<<<<<<<<<< + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + */ + /*else*/ { + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1420 + * else: + * for i in range(extent): + * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, itemsize, item) + * data += stride + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1422 + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + * data += stride # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + + /* function exit code */ +} + +/* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v___pyx_type = 0; + long __pyx_v___pyx_checksum; + PyObject *__pyx_v___pyx_state = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v___pyx_type = values[0]; + __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_v___pyx_state = values[2]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_v___pyx_PickleError = 0; + PyObject *__pyx_v___pyx_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); + if (__pyx_t_1) { + + /* "(tree fragment)":5 + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + */ + __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_n_s_PickleError); + __Pyx_GIVEREF(__pyx_n_s_PickleError); + PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); + __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_t_2); + __pyx_v___pyx_PickleError = __pyx_t_2; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":6 + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + */ + __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_INCREF(__pyx_v___pyx_PickleError); + __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 6, __pyx_L1_error) + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + } + + /* "(tree fragment)":7 + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v___pyx_result = __pyx_t_3; + __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + __pyx_t_1 = (__pyx_v___pyx_state != Py_None); + __pyx_t_6 = (__pyx_t_1 != 0); + if (__pyx_t_6) { + + /* "(tree fragment)":9 + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) + __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + } + + /* "(tree fragment)":10 + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result # <<<<<<<<<<<<<< + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v___pyx_result); + __pyx_r = __pyx_v___pyx_result; + goto __pyx_L0; + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v___pyx_PickleError); + __Pyx_XDECREF(__pyx_v___pyx_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); + + /* "(tree fragment)":12 + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 12, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v___pyx_result->name); + __Pyx_DECREF(__pyx_v___pyx_result->name); + __pyx_v___pyx_result->name = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 13, __pyx_L1_error) + } + __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_4 = ((__pyx_t_3 > 1) != 0); + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_5 = (__pyx_t_4 != 0); + __pyx_t_2 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (__pyx_t_2) { + + /* "(tree fragment)":14 + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< + */ + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 14, __pyx_L1_error) + } + __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + } + } + __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + } + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_tp_new_5libmr_MR(PyTypeObject *t, PyObject *a, PyObject *k) { + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + if (unlikely(__pyx_pw_5libmr_2MR_1__cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_5libmr_MR(PyObject *o) { + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_pw_5libmr_2MR_3__dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + (*Py_TYPE(o)->tp_free)(o); +} + +static PyObject *__pyx_getprop_5libmr_2MR_is_valid(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_5libmr_2MR_8is_valid_1__get__(o); +} + +static PyObject *__pyx_getprop_5libmr_2MR_tailsize(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_5libmr_2MR_8tailsize_1__get__(o); +} + +static int __pyx_setprop_5libmr_2MR_tailsize(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) { + if (v) { + return __pyx_pw_5libmr_2MR_8tailsize_3__set__(o, v); + } + else { + PyErr_SetString(PyExc_NotImplementedError, "__del__"); + return -1; + } +} + +static PyObject *__pyx_getprop_5libmr_2MR_translate_amount(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_5libmr_2MR_16translate_amount_1__get__(o); +} + +static int __pyx_setprop_5libmr_2MR_translate_amount(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) { + if (v) { + return __pyx_pw_5libmr_2MR_16translate_amount_3__set__(o, v); + } + else { + PyErr_SetString(PyExc_NotImplementedError, "__del__"); + return -1; + } +} + +static PyObject *__pyx_getprop_5libmr_2MR_sign(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_5libmr_2MR_4sign_1__get__(o); +} + +static int __pyx_setprop_5libmr_2MR_sign(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) { + if (v) { + return __pyx_pw_5libmr_2MR_4sign_3__set__(o, v); + } + else { + PyErr_SetString(PyExc_NotImplementedError, "__del__"); + return -1; + } +} + +static PyObject *__pyx_getprop_5libmr_2MR_small_score(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_5libmr_2MR_11small_score_1__get__(o); +} + +static int __pyx_setprop_5libmr_2MR_small_score(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) { + if (v) { + return __pyx_pw_5libmr_2MR_11small_score_3__set__(o, v); + } + else { + PyErr_SetString(PyExc_NotImplementedError, "__del__"); + return -1; + } +} + +static PyObject *__pyx_getprop_5libmr_2MR_verbose(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_5libmr_2MR_7verbose_1__get__(o); +} + +static int __pyx_setprop_5libmr_2MR_verbose(PyObject *o, PyObject *v, CYTHON_UNUSED void *x) { + if (v) { + return __pyx_pw_5libmr_2MR_7verbose_3__set__(o, v); + } + else { + PyErr_SetString(PyExc_NotImplementedError, "__del__"); + return -1; + } +} + +static PyMethodDef __pyx_methods_5libmr_MR[] = { + {"fit_low", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5libmr_2MR_5fit_low, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5libmr_2MR_4fit_low}, + {"fit_high", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5libmr_2MR_7fit_high, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5libmr_2MR_6fit_high}, + {"mr_save", (PyCFunction)__pyx_pw_5libmr_2MR_9mr_save, METH_O, __pyx_doc_5libmr_2MR_8mr_save}, + {"mr_load", (PyCFunction)__pyx_pw_5libmr_2MR_11mr_load, METH_O, __pyx_doc_5libmr_2MR_10mr_load}, + {"fit_svm", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5libmr_2MR_13fit_svm, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5libmr_2MR_12fit_svm}, + {"reset", (PyCFunction)__pyx_pw_5libmr_2MR_15reset, METH_NOARGS, 0}, + {"predict_match", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5libmr_2MR_17predict_match, METH_VARARGS|METH_KEYWORDS, __pyx_doc_5libmr_2MR_16predict_match}, + {"w_score", (PyCFunction)__pyx_pw_5libmr_2MR_19w_score, METH_O, __pyx_doc_5libmr_2MR_18w_score}, + {"cdf", (PyCFunction)__pyx_pw_5libmr_2MR_21cdf, METH_O, __pyx_doc_5libmr_2MR_20cdf}, + {"inv", (PyCFunction)__pyx_pw_5libmr_2MR_23inv, METH_O, __pyx_doc_5libmr_2MR_22inv}, + {"w_score_vector", (PyCFunction)__pyx_pw_5libmr_2MR_25w_score_vector, METH_O, __pyx_doc_5libmr_2MR_24w_score_vector}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_5libmr_2MR_31__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_5libmr_2MR_33__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_5libmr_MR[] = { + {(char *)"is_valid", __pyx_getprop_5libmr_2MR_is_valid, 0, (char *)0, 0}, + {(char *)"tailsize", __pyx_getprop_5libmr_2MR_tailsize, __pyx_setprop_5libmr_2MR_tailsize, (char *)0, 0}, + {(char *)"translate_amount", __pyx_getprop_5libmr_2MR_translate_amount, __pyx_setprop_5libmr_2MR_translate_amount, (char *)0, 0}, + {(char *)"sign", __pyx_getprop_5libmr_2MR_sign, __pyx_setprop_5libmr_2MR_sign, (char *)0, 0}, + {(char *)"small_score", __pyx_getprop_5libmr_2MR_small_score, __pyx_setprop_5libmr_2MR_small_score, (char *)0, 0}, + {(char *)"verbose", __pyx_getprop_5libmr_2MR_verbose, __pyx_setprop_5libmr_2MR_verbose, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_5libmr_MR = { + PyVarObject_HEAD_INIT(0, 0) + "libmr.MR", /*tp_name*/ + sizeof(struct __pyx_obj_5libmr_MR), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_5libmr_MR, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_pw_5libmr_2MR_29__repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + __pyx_pw_5libmr_2MR_27__str__, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_5libmr_MR, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_5libmr_MR, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_5libmr_MR, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_array __pyx_vtable_array; + +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_array_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_array_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_array; + p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_array(PyObject *o) { + struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_array___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->mode); + Py_CLEAR(p->_format); + (*Py_TYPE(o)->tp_free)(o); +} +static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_array___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { + PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); + if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + v = __pyx_array___getattr__(o, n); + } + return v; +} + +static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); +} + +static PyMethodDef __pyx_methods_array[] = { + {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_array[] = { + {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_array = { + __pyx_array___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_array, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_array = { + __pyx_array___len__, /*mp_length*/ + __pyx_array___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_array = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_array_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_array = { + PyVarObject_HEAD_INIT(0, 0) + "libmr.array", /*tp_name*/ + sizeof(struct __pyx_array_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_array, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + __pyx_tp_getattro_array, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_array, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_array, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_array, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_MemviewEnum_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_MemviewEnum_obj *)o); + p->name = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_Enum(PyObject *o) { + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + Py_CLEAR(p->name); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + if (p->name) { + e = (*v)(p->name, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_Enum(PyObject *o) { + PyObject* tmp; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + tmp = ((PyObject*)p->name); + p->name = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyMethodDef __pyx_methods_Enum[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_MemviewEnum = { + PyVarObject_HEAD_INIT(0, 0) + "libmr.Enum", /*tp_name*/ + sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_Enum, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_MemviewEnum___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_Enum, /*tp_traverse*/ + __pyx_tp_clear_Enum, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_Enum, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_MemviewEnum___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_Enum, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; + +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryview_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryview_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_memoryview; + p->obj = Py_None; Py_INCREF(Py_None); + p->_size = Py_None; Py_INCREF(Py_None); + p->_array_interface = Py_None; Py_INCREF(Py_None); + p->view.obj = NULL; + if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_memoryview(PyObject *o) { + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryview___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->obj); + Py_CLEAR(p->_size); + Py_CLEAR(p->_array_interface); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + if (p->obj) { + e = (*v)(p->obj, a); if (e) return e; + } + if (p->_size) { + e = (*v)(p->_size, a); if (e) return e; + } + if (p->_array_interface) { + e = (*v)(p->_array_interface, a); if (e) return e; + } + if (p->view.obj) { + e = (*v)(p->view.obj, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_memoryview(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + tmp = ((PyObject*)p->obj); + p->obj = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_size); + p->_size = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_array_interface); + p->_array_interface = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + Py_CLEAR(p->view.obj); + return 0; +} +static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_memoryview___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); +} + +static PyMethodDef __pyx_methods_memoryview[] = { + {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, + {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, + {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, + {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_memoryview[] = { + {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, + {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, + {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, + {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, + {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, + {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, + {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, + {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, + {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_memoryview = { + __pyx_memoryview___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_memoryview, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_memoryview = { + __pyx_memoryview___len__, /*mp_length*/ + __pyx_memoryview___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_memoryview = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_memoryview_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_memoryview = { + PyVarObject_HEAD_INIT(0, 0) + "libmr.memoryview", /*tp_name*/ + sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_memoryview___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + __pyx_memoryview___str__, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_memoryview, /*tp_traverse*/ + __pyx_tp_clear_memoryview, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_memoryview, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_memoryview, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_memoryview, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; + +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryviewslice_obj *p; + PyObject *o = __pyx_tp_new_memoryview(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryviewslice_obj *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; + p->from_object = Py_None; Py_INCREF(Py_None); + p->from_slice.memview = NULL; + return o; +} + +static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryviewslice___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->from_object); + PyObject_GC_Track(o); + __pyx_tp_dealloc_memoryview(o); +} + +static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; + if (p->from_object) { + e = (*v)(p->from_object, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear__memoryviewslice(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + __pyx_tp_clear_memoryview(o); + tmp = ((PyObject*)p->from_object); + p->from_object = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + __PYX_XDEC_MEMVIEW(&p->from_slice, 1); + return 0; +} + +static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); +} + +static PyMethodDef __pyx_methods__memoryviewslice[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { + {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_memoryviewslice = { + PyVarObject_HEAD_INIT(0, 0) + "libmr._memoryviewslice", /*tp_name*/ + sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___repr__, /*tp_repr*/ + #else + 0, /*tp_repr*/ + #endif + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___str__, /*tp_str*/ + #else + 0, /*tp_str*/ + #endif + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "Internal class for passing memoryview slices to Python", /*tp_doc*/ + __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ + __pyx_tp_clear__memoryviewslice, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods__memoryviewslice, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets__memoryviewslice, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new__memoryviewslice, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_libmr(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_libmr}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "libmr", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, + {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, + {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, + {&__pyx_kp_s_Data_initizalization_complete_No, __pyx_k_Data_initizalization_complete_No, sizeof(__pyx_k_Data_initizalization_complete_No), 0, 0, 1, 0}, + {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, + {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, + {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, + {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, + {&__pyx_n_s_MR, __pyx_k_MR, sizeof(__pyx_k_MR), 0, 0, 1, 1}, + {&__pyx_kp_s_MR_object_r, __pyx_k_MR_object_r, sizeof(__pyx_k_MR_object_r), 0, 0, 1, 0}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, + {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, + {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, + {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, + {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, + {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, + {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, + {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, + {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, + {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, + {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, + {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, + {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, + {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, + {&__pyx_n_s_end, __pyx_k_end, sizeof(__pyx_k_end), 0, 0, 1, 1}, + {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, + {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, + {&__pyx_n_s_file, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1}, + {&__pyx_n_s_fit_size, __pyx_k_fit_size, sizeof(__pyx_k_fit_size), 0, 0, 1, 1}, + {&__pyx_n_s_fit_type, __pyx_k_fit_type, sizeof(__pyx_k_fit_type), 0, 0, 1, 1}, + {&__pyx_n_s_fitting_size, __pyx_k_fitting_size, sizeof(__pyx_k_fitting_size), 0, 0, 1, 1}, + {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, + {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, + {&__pyx_n_s_has_key, __pyx_k_has_key, sizeof(__pyx_k_has_key), 0, 0, 1, 1}, + {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_input, __pyx_k_input, sizeof(__pyx_k_input), 0, 0, 1, 1}, + {&__pyx_n_s_inputData, __pyx_k_inputData, sizeof(__pyx_k_inputData), 0, 0, 1, 1}, + {&__pyx_n_s_inputDataSize, __pyx_k_inputDataSize, sizeof(__pyx_k_inputDataSize), 0, 0, 1, 1}, + {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, + {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, + {&__pyx_n_s_label_has_positive_score, __pyx_k_label_has_positive_score, sizeof(__pyx_k_label_has_positive_score), 0, 0, 1, 1}, + {&__pyx_n_s_label_of_interest, __pyx_k_label_of_interest, sizeof(__pyx_k_label_of_interest), 0, 0, 1, 1}, + {&__pyx_n_s_labels, __pyx_k_labels, sizeof(__pyx_k_labels), 0, 0, 1, 1}, + {&__pyx_n_s_libmr, __pyx_k_libmr, sizeof(__pyx_k_libmr), 0, 0, 1, 1}, + {&__pyx_kp_s_libmr_pyx, __pyx_k_libmr_pyx, sizeof(__pyx_k_libmr_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_load_from_string, __pyx_k_load_from_string, sizeof(__pyx_k_load_from_string), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, + {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, + {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, + {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, + {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, + {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, + {&__pyx_n_s_print, __pyx_k_print, sizeof(__pyx_k_print), 0, 0, 1, 1}, + {&__pyx_n_s_pymr, __pyx_k_pymr, sizeof(__pyx_k_pymr), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, + {&__pyx_n_s_scores_to_drop, __pyx_k_scores_to_drop, sizeof(__pyx_k_scores_to_drop), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, + {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, + {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, + {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, + {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, + {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, + {&__pyx_n_s_svm_data, __pyx_k_svm_data, sizeof(__pyx_k_svm_data), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_threshold, __pyx_k_threshold, sizeof(__pyx_k_threshold), 0, 0, 1, 1}, + {&__pyx_n_s_translate_amount, __pyx_k_translate_amount, sizeof(__pyx_k_translate_amount), 0, 0, 1, 1}, + {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, + {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, + {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, + {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, + {&__pyx_n_s_verbose, __pyx_k_verbose, sizeof(__pyx_k_verbose), 0, 0, 1, 1}, + {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, + {&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1}, + {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + #if PY_MAJOR_VERSION >= 3 + __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) __PYX_ERR(0, 115, __pyx_L1_error) + #else + __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) __PYX_ERR(0, 115, __pyx_L1_error) + #endif + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 179, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 884, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) + __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) + __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) + __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) + __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":884 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(2, 884, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../../anaconda3/envs/mmaction/lib/python3.7/site-packages/numpy/__init__.pxd":890 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 890, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 495, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __pyx_tuple__16 = PyTuple_New(1); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__16); + __Pyx_INCREF(__pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_int_neg_1); + PyTuple_SET_ITEM(__pyx_tuple__16, 0, __pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_tuple__16); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__18); + __Pyx_GIVEREF(__pyx_tuple__18); + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_slice__19 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__19)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__19); + __Pyx_GIVEREF(__pyx_slice__19); + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__20); + __Pyx_GIVEREF(__pyx_tuple__20); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__22); + __Pyx_GIVEREF(__pyx_tuple__22); + + /* "libmr.pyx":254 + * self.thisptr.verbose = verbose + * + * def load_from_string(str input): # <<<<<<<<<<<<<< + * """ + * Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR()) + */ + __pyx_tuple__23 = PyTuple_Pack(2, __pyx_n_s_input, __pyx_n_s_pymr); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(1, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_libmr_pyx, __pyx_n_s_load_from_string, 254, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 254, __pyx_L1_error) + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__25); + __Pyx_GIVEREF(__pyx_tuple__25); + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__26); + __Pyx_GIVEREF(__pyx_tuple__26); + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__27); + __Pyx_GIVEREF(__pyx_tuple__27); + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__28); + __Pyx_GIVEREF(__pyx_tuple__28); + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__29); + __Pyx_GIVEREF(__pyx_tuple__29); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_tuple__30 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__30); + __Pyx_GIVEREF(__pyx_tuple__30); + __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + generic = Py_None; Py_INCREF(Py_None); + strided = Py_None; Py_INCREF(Py_None); + indirect = Py_None; Py_INCREF(Py_None); + contiguous = Py_None; Py_INCREF(Py_None); + indirect_contiguous = Py_None; Py_INCREF(Py_None); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + if (PyType_Ready(&__pyx_type_5libmr_MR) < 0) __PYX_ERR(0, 95, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type_5libmr_MR.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_5libmr_MR.tp_dictoffset && __pyx_type_5libmr_MR.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_5libmr_MR.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + #if CYTHON_COMPILING_IN_CPYTHON + { + PyObject *wrapper = PyObject_GetAttrString((PyObject *)&__pyx_type_5libmr_MR, "__str__"); if (unlikely(!wrapper)) __PYX_ERR(0, 95, __pyx_L1_error) + if (Py_TYPE(wrapper) == &PyWrapperDescr_Type) { + __pyx_wrapperbase_5libmr_2MR_26__str__ = *((PyWrapperDescrObject *)wrapper)->d_base; + __pyx_wrapperbase_5libmr_2MR_26__str__.doc = __pyx_doc_5libmr_2MR_26__str__; + ((PyWrapperDescrObject *)wrapper)->d_base = &__pyx_wrapperbase_5libmr_2MR_26__str__; + } + } + #endif + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_MR, (PyObject *)&__pyx_type_5libmr_MR) < 0) __PYX_ERR(0, 95, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_5libmr_MR) < 0) __PYX_ERR(0, 95, __pyx_L1_error) + __pyx_ptype_5libmr_MR = &__pyx_type_5libmr_MR; + __pyx_vtabptr_array = &__pyx_vtable_array; + __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; + if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_array.tp_print = 0; + #endif + if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + __pyx_array_type = &__pyx_type___pyx_array; + if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_MemviewEnum.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; + __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; + __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; + __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; + __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; + __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; + __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; + __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; + __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; + if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryview.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + __pyx_memoryview_type = &__pyx_type___pyx_memoryview; + __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; + __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; + __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; + __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; + __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; + if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryviewslice.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 199, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(2, 199, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(2, 222, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(2, 226, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(2, 238, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore); + if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(2, 764, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initlibmr(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initlibmr(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_libmr(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_libmr(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_libmr(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + static PyThread_type_lock __pyx_t_2[8]; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'libmr' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_libmr(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("libmr", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_libmr) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "libmr")) { + if (unlikely(PyDict_SetItemString(modules, "libmr", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "libmr.pyx":37 + * from libcpp.string cimport string + * cimport numpy as np + * import numpy as np # <<<<<<<<<<<<<< + * + * cdef extern from "MetaRecognition.h": + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "libmr.pyx":254 + * self.thisptr.verbose = verbose + * + * def load_from_string(str input): # <<<<<<<<<<<<<< + * """ + * Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR()) + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5libmr_1load_from_string, NULL, __pyx_n_s_libmr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_load_from_string, __pyx_t_1) < 0) __PYX_ERR(0, 254, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "libmr.pyx":1 + * # # <<<<<<<<<<<<<< + * # libmr.pyx: + * # + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":209 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * def __dealloc__(array self): + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_array_type); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(generic); + __Pyx_DECREF_SET(generic, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(strided); + __Pyx_DECREF_SET(strided, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect); + __Pyx_DECREF_SET(indirect, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(contiguous); + __Pyx_DECREF_SET(contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect_contiguous); + __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":316 + * + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ + * PyThread_allocate_lock(), + */ + __pyx_memoryview_thread_locks_used = 0; + + /* "View.MemoryView":317 + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< + * PyThread_allocate_lock(), + * PyThread_allocate_lock(), + */ + __pyx_t_2[0] = PyThread_allocate_lock(); + __pyx_t_2[1] = PyThread_allocate_lock(); + __pyx_t_2[2] = PyThread_allocate_lock(); + __pyx_t_2[3] = PyThread_allocate_lock(); + __pyx_t_2[4] = PyThread_allocate_lock(); + __pyx_t_2[5] = PyThread_allocate_lock(); + __pyx_t_2[6] = PyThread_allocate_lock(); + __pyx_t_2[7] = PyThread_allocate_lock(); + memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); + + /* "View.MemoryView":549 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryview_type); + + /* "View.MemoryView":995 + * return self.from_object + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryviewslice_type); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init libmr", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init libmr"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (__Pyx_PyFastCFunction_Check(func)) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* DictGetItem */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + if (unlikely(PyTuple_Check(key))) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) { + PyErr_SetObject(PyExc_KeyError, args); + Py_DECREF(args); + } + } else { + PyErr_SetObject(PyExc_KeyError, key); + } + } + return NULL; + } + Py_INCREF(value); + return value; +} +#endif + +/* PyIntCompare */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) { + if (op1 == op2) { + Py_RETURN_TRUE; + } + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op1))) { + const long b = intval; + long a = PyInt_AS_LONG(op1); + if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE; + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + int unequal; + unsigned long uintval; + Py_ssize_t size = Py_SIZE(op1); + const digit* digits = ((PyLongObject*)op1)->ob_digit; + if (intval == 0) { + if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; + } else if (intval < 0) { + if (size >= 0) + Py_RETURN_FALSE; + intval = -intval; + size = -size; + } else { + if (size <= 0) + Py_RETURN_FALSE; + } + uintval = (unsigned long) intval; +#if PyLong_SHIFT * 4 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 4)) { + unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif +#if PyLong_SHIFT * 3 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 3)) { + unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif +#if PyLong_SHIFT * 2 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 2)) { + unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif +#if PyLong_SHIFT * 1 < SIZEOF_LONG*8 + if (uintval >> (PyLong_SHIFT * 1)) { + unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK)) + | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)); + } else +#endif + unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK)); + if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE; + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE; + } + return ( + PyObject_RichCompare(op1, op2, Py_EQ)); +} + +/* ObjectGetItem */ +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { + PyObject *runerr; + Py_ssize_t key_value; + PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; + if (unlikely(!(m && m->sq_item))) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); + return NULL; + } + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { + PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; + if (likely(m && m->mp_subscript)) { + return m->mp_subscript(obj, key); + } + return __Pyx_PyObject_GetIndex(obj, key); +} +#endif + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* ExtTypeTest */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((size_t)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* BufferIndexError */ + static void __Pyx_RaiseBufferIndexError(int axis) { + PyErr_Format(PyExc_IndexError, + "Out of bounds on buffer access (axis %d)", axis); +} + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* MemviewSliceInit */ + static int +__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference) +{ + __Pyx_RefNannyDeclarations + int i, retval=-1; + Py_buffer *buf = &memview->view; + __Pyx_RefNannySetupContext("init_memviewslice", 0); + if (unlikely(memviewslice->memview || memviewslice->data)) { + PyErr_SetString(PyExc_ValueError, + "memviewslice is already initialized!"); + goto fail; + } + if (buf->strides) { + for (i = 0; i < ndim; i++) { + memviewslice->strides[i] = buf->strides[i]; + } + } else { + Py_ssize_t stride = buf->itemsize; + for (i = ndim - 1; i >= 0; i--) { + memviewslice->strides[i] = stride; + stride *= buf->shape[i]; + } + } + for (i = 0; i < ndim; i++) { + memviewslice->shape[i] = buf->shape[i]; + if (buf->suboffsets) { + memviewslice->suboffsets[i] = buf->suboffsets[i]; + } else { + memviewslice->suboffsets[i] = -1; + } + } + memviewslice->memview = memview; + memviewslice->data = (char *)buf->buf; + if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { + Py_INCREF(memview); + } + retval = 0; + goto no_fail; +fail: + memviewslice->memview = 0; + memviewslice->data = 0; + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} +#ifndef Py_NO_RETURN +#define Py_NO_RETURN +#endif +static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { + va_list vargs; + char msg[200]; +#ifdef HAVE_STDARG_PROTOTYPES + va_start(vargs, fmt); +#else + va_start(vargs); +#endif + vsnprintf(msg, 200, fmt, vargs); + va_end(vargs); + Py_FatalError(msg); +} +static CYTHON_INLINE int +__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)++; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE int +__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)--; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE void +__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) +{ + int first_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) + return; + if (unlikely(__pyx_get_slice_count(memview) < 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + first_time = __pyx_add_acquisition_count(memview) == 0; + if (unlikely(first_time)) { + if (have_gil) { + Py_INCREF((PyObject *) memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_INCREF((PyObject *) memview); + PyGILState_Release(_gilstate); + } + } +} +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, + int have_gil, int lineno) { + int last_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + memslice->memview = NULL; + return; + } + if (unlikely(__pyx_get_slice_count(memview) <= 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + last_time = __pyx_sub_acquisition_count(memview) == 1; + memslice->data = NULL; + if (unlikely(last_time)) { + if (have_gil) { + Py_CLEAR(memslice->memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_CLEAR(memslice->memview); + PyGILState_Release(_gilstate); + } + } else { + memslice->memview = NULL; + } +} + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* ArgTypeTest */ + static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* PyObjectCallNoArg */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, NULL, 0); + } +#endif +#ifdef __Pyx_CyFunction_USED + if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) +#else + if (likely(PyCFunction_Check(func))) +#endif + { + if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { + return __Pyx_PyObject_CallMethO(func, NULL); + } + } + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); +} +#endif + +/* GetTopmostException */ + #if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* BytesEquals */ + static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals */ + static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + +/* None */ + static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { + Py_ssize_t q = a / b; + Py_ssize_t r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* GetAttr */ + static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* decode_c_string */ + static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + Py_ssize_t length; + if (unlikely((start < 0) | (stop < 0))) { + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (unlikely(stop <= start)) + return __Pyx_NewRef(__pyx_empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/* GetAttr3 */ + static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r = __Pyx_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +} + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* SwapException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0 || (x^b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + const digit* digits = ((PyLongObject*)op1)->ob_digit; + const Py_ssize_t size = Py_SIZE(op1); + if (likely(__Pyx_sst_abs(size) <= 1)) { + a = likely(size) ? digits[0] : 0; + if (size == -1) a = -a; + } else { + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + } + x = a + b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla + llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + double result; + PyFPE_START_PROTECT("add", return NULL) + result = ((double)a) + (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#endif + +/* None */ + static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { + PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); +} + +/* None */ + static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ImportFrom */ + static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* HasAttr */ + static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (unlikely(!r)) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +/* PyObject_GenericGetAttrNoDict */ + #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ + #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* PyObjectGetAttrStrNoError */ + static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ + static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* SetVTable */ + static int __Pyx_SetVtable(PyObject *dict, void *vtable) { +#if PY_VERSION_HEX >= 0x02070000 + PyObject *ob = PyCapsule_New(vtable, 0, 0); +#else + PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); +#endif + if (!ob) + goto bad; + if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* MemviewSliceIsContig */ + static int +__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) +{ + int i, index, step, start; + Py_ssize_t itemsize = mvs.memview->view.itemsize; + if (order == 'F') { + step = 1; + start = 0; + } else { + step = -1; + start = ndim - 1; + } + for (i = 0; i < ndim; i++) { + index = start + step * i; + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + return 0; + itemsize *= mvs.shape[index]; + } + return 1; +} + +/* OverlappingSlices */ + static void +__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, + void **out_start, void **out_end, + int ndim, size_t itemsize) +{ + char *start, *end; + int i; + start = end = slice->data; + for (i = 0; i < ndim; i++) { + Py_ssize_t stride = slice->strides[i]; + Py_ssize_t extent = slice->shape[i]; + if (extent == 0) { + *out_start = *out_end = start; + return; + } else { + if (stride > 0) + end += stride * (extent - 1); + else + start += stride * (extent - 1); + } + } + *out_start = start; + *out_end = end + itemsize; +} +static int +__pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize) +{ + void *start1, *end1, *start2, *end2; + __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); + __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); + return (start1 < end2) && (start2 < end1); +} + +/* Capsule */ + static CYTHON_INLINE PyObject * +__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) +{ + PyObject *cobj; +#if PY_VERSION_HEX >= 0x02070000 + cobj = PyCapsule_New(p, sig, NULL); +#else + cobj = PyCObject_FromVoidPtr(p, NULL); +#endif + return cobj; +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* TypeInfoCompare */ + static int +__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) +{ + int i; + if (!a || !b) + return 0; + if (a == b) + return 1; + if (a->size != b->size || a->typegroup != b->typegroup || + a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { + if (a->typegroup == 'H' || b->typegroup == 'H') { + return a->size == b->size; + } else { + return 0; + } + } + if (a->ndim) { + for (i = 0; i < a->ndim; i++) + if (a->arraysize[i] != b->arraysize[i]) + return 0; + } + if (a->typegroup == 'S') { + if (a->flags != b->flags) + return 0; + if (a->fields || b->fields) { + if (!(a->fields && b->fields)) + return 0; + for (i = 0; a->fields[i].type && b->fields[i].type; i++) { + __Pyx_StructField *field_a = a->fields + i; + __Pyx_StructField *field_b = b->fields + i; + if (field_a->offset != field_b->offset || + !__pyx_typeinfo_cmp(field_a->type, field_b->type)) + return 0; + } + return !a->fields[i].type && !b->fields[i].type; + } + } + return 1; +} + +/* MemviewSliceValidateAndInit */ + static int +__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) +{ + if (buf->shape[dim] <= 1) + return 1; + if (buf->strides) { + if (spec & __Pyx_MEMVIEW_CONTIG) { + if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { + if (unlikely(buf->strides[dim] != sizeof(void *))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly contiguous " + "in dimension %d.", dim); + goto fail; + } + } else if (unlikely(buf->strides[dim] != buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_FOLLOW) { + Py_ssize_t stride = buf->strides[dim]; + if (stride < 0) + stride = -stride; + if (unlikely(stride < buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + } else { + if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not contiguous in " + "dimension %d", dim); + goto fail; + } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not indirect in " + "dimension %d", dim); + goto fail; + } else if (unlikely(buf->suboffsets)) { + PyErr_SetString(PyExc_ValueError, + "Buffer exposes suboffsets but no strides"); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) +{ + if (spec & __Pyx_MEMVIEW_DIRECT) { + if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { + PyErr_Format(PyExc_ValueError, + "Buffer not compatible with direct access " + "in dimension %d.", dim); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_PTR) { + if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly accessible " + "in dimension %d.", dim); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) +{ + int i; + if (c_or_f_flag & __Pyx_IS_F_CONTIG) { + Py_ssize_t stride = 1; + for (i = 0; i < ndim; i++) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not fortran contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { + Py_ssize_t stride = 1; + for (i = ndim - 1; i >- 1; i--) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not C contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } + return 1; +fail: + return 0; +} +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj) +{ + struct __pyx_memoryview_obj *memview, *new_memview; + __Pyx_RefNannyDeclarations + Py_buffer *buf; + int i, spec = 0, retval = -1; + __Pyx_BufFmt_Context ctx; + int from_memoryview = __pyx_memoryview_check(original_obj); + __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); + if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) + original_obj)->typeinfo)) { + memview = (struct __pyx_memoryview_obj *) original_obj; + new_memview = NULL; + } else { + memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + original_obj, buf_flags, 0, dtype); + new_memview = memview; + if (unlikely(!memview)) + goto fail; + } + buf = &memview->view; + if (unlikely(buf->ndim != ndim)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + ndim, buf->ndim); + goto fail; + } + if (new_memview) { + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; + } + if (unlikely((unsigned) buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " + "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", + buf->itemsize, + (buf->itemsize > 1) ? "s" : "", + dtype->name, + dtype->size, + (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->len > 0) { + for (i = 0; i < ndim; i++) { + spec = axes_specs[i]; + if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) + goto fail; + if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) + goto fail; + } + if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) + goto fail; + } + if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, + new_memview != NULL) == -1)) { + goto fail; + } + retval = 0; + goto no_fail; +fail: + Py_XDECREF(new_memview); + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, + &__Pyx_TypeInfo_double, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* Print */ + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3 +static PyObject *__Pyx_GetStdout(void) { + PyObject *f = PySys_GetObject((char *)"stdout"); + if (!f) { + PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout"); + } + return f; +} +static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) { + int i; + if (!f) { + if (!(f = __Pyx_GetStdout())) + return -1; + } + Py_INCREF(f); + for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) { + PyObject* v; + if (PyFile_SoftSpace(f, 1)) { + if (PyFile_WriteString(" ", f) < 0) + goto error; + } + v = PyTuple_GET_ITEM(arg_tuple, i); + if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0) + goto error; + if (PyString_Check(v)) { + char *s = PyString_AsString(v); + Py_ssize_t len = PyString_Size(v); + if (len > 0) { + switch (s[len-1]) { + case ' ': break; + case '\f': case '\r': case '\n': case '\t': case '\v': + PyFile_SoftSpace(f, 0); + break; + default: break; + } + } + } + } + if (newline) { + if (PyFile_WriteString("\n", f) < 0) + goto error; + PyFile_SoftSpace(f, 0); + } + Py_DECREF(f); + return 0; +error: + Py_DECREF(f); + return -1; +} +#else +static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) { + PyObject* kwargs = 0; + PyObject* result = 0; + PyObject* end_string; + if (unlikely(!__pyx_print)) { + __pyx_print = PyObject_GetAttr(__pyx_b, __pyx_n_s_print); + if (!__pyx_print) + return -1; + } + if (stream) { + kwargs = PyDict_New(); + if (unlikely(!kwargs)) + return -1; + if (unlikely(PyDict_SetItem(kwargs, __pyx_n_s_file, stream) < 0)) + goto bad; + if (!newline) { + end_string = PyUnicode_FromStringAndSize(" ", 1); + if (unlikely(!end_string)) + goto bad; + if (PyDict_SetItem(kwargs, __pyx_n_s_end, end_string) < 0) { + Py_DECREF(end_string); + goto bad; + } + Py_DECREF(end_string); + } + } else if (!newline) { + if (unlikely(!__pyx_print_kwargs)) { + __pyx_print_kwargs = PyDict_New(); + if (unlikely(!__pyx_print_kwargs)) + return -1; + end_string = PyUnicode_FromStringAndSize(" ", 1); + if (unlikely(!end_string)) + return -1; + if (PyDict_SetItem(__pyx_print_kwargs, __pyx_n_s_end, end_string) < 0) { + Py_DECREF(end_string); + return -1; + } + Py_DECREF(end_string); + } + kwargs = __pyx_print_kwargs; + } + result = PyObject_Call(__pyx_print, arg_tuple, kwargs); + if (unlikely(kwargs) && (kwargs != __pyx_print_kwargs)) + Py_DECREF(kwargs); + if (!result) + return -1; + Py_DECREF(result); + return 0; +bad: + if (kwargs != __pyx_print_kwargs) + Py_XDECREF(kwargs); + return -1; +} +#endif + +/* MemviewDtypeToObject */ + static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { + return (PyObject *) PyFloat_FromDouble(*(double *) itemp); +} +static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { + double value = __pyx_PyFloat_AsDouble(obj); + if ((value == (double)-1) && PyErr_Occurred()) + return 0; + *(double *) itemp = value; + return 1; +} + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = (float)(1.0) / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = (float)(1.0) / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = (double)(1.0) / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = (double)(1.0) / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* MemviewSliceCopyTemplate */ + static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object) +{ + __Pyx_RefNannyDeclarations + int i; + __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; + struct __pyx_memoryview_obj *from_memview = from_mvs->memview; + Py_buffer *buf = &from_memview->view; + PyObject *shape_tuple = NULL; + PyObject *temp_int = NULL; + struct __pyx_array_obj *array_obj = NULL; + struct __pyx_memoryview_obj *memview_obj = NULL; + __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); + for (i = 0; i < ndim; i++) { + if (unlikely(from_mvs->suboffsets[i] >= 0)) { + PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " + "indirect dimensions (axis %d)", i); + goto fail; + } + } + shape_tuple = PyTuple_New(ndim); + if (unlikely(!shape_tuple)) { + goto fail; + } + __Pyx_GOTREF(shape_tuple); + for(i = 0; i < ndim; i++) { + temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); + if(unlikely(!temp_int)) { + goto fail; + } else { + PyTuple_SET_ITEM(shape_tuple, i, temp_int); + temp_int = NULL; + } + } + array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); + if (unlikely(!array_obj)) { + goto fail; + } + __Pyx_GOTREF(array_obj); + memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + (PyObject *) array_obj, contig_flag, + dtype_is_object, + from_mvs->memview->typeinfo); + if (unlikely(!memview_obj)) + goto fail; + if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) + goto fail; + if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, + dtype_is_object) < 0)) + goto fail; + goto no_fail; +fail: + __Pyx_XDECREF(new_mvs.memview); + new_mvs.memview = NULL; + new_mvs.data = NULL; +no_fail: + __Pyx_XDECREF(shape_tuple); + __Pyx_XDECREF(temp_int); + __Pyx_XDECREF(array_obj); + __Pyx_RefNannyFinishContext(); + return new_mvs; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const size_t neg_one = (size_t) -1, const_zero = (size_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(size_t) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (size_t) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (size_t) 0; + case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0]) + case 2: + if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) { + return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) { + return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) { + return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (size_t) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(size_t) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (size_t) 0; + case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0]) + case -2: + if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { + return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { + return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { + return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { + return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { + return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { + return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + } +#endif + if (sizeof(size_t) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + size_t val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (size_t) -1; + } + } else { + size_t val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (size_t) -1; + val = __Pyx_PyInt_As_size_t(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to size_t"); + return (size_t) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to size_t"); + return (size_t) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* PrintOne */ + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3 +static int __Pyx_PrintOne(PyObject* f, PyObject *o) { + if (!f) { + if (!(f = __Pyx_GetStdout())) + return -1; + } + Py_INCREF(f); + if (PyFile_SoftSpace(f, 0)) { + if (PyFile_WriteString(" ", f) < 0) + goto error; + } + if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0) + goto error; + if (PyFile_WriteString("\n", f) < 0) + goto error; + Py_DECREF(f); + return 0; +error: + Py_DECREF(f); + return -1; + /* the line below is just to avoid C compiler + * warnings about unused functions */ + return __Pyx_Print(f, NULL, 0); +} +#else +static int __Pyx_PrintOne(PyObject* stream, PyObject *o) { + int res; + PyObject* arg_tuple = PyTuple_Pack(1, o); + if (unlikely(!arg_tuple)) + return -1; + res = __Pyx_Print(stream, arg_tuple, 1); + Py_DECREF(arg_tuple); + return res; +} +#endif + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const char neg_one = (char) -1, const_zero = (char) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(char) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (char) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { + return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { + return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { + return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(char) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) + case -2: + if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + } +#endif + if (sizeof(char) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + char val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (char) -1; + } + } else { + char val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (char) -1; + val = __Pyx_PyInt_As_char(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to char"); + return (char) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to char"); + return (char) -1; +} + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.pxd b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.pxd new file mode 100644 index 0000000..304132c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.pxd @@ -0,0 +1,4 @@ +cdef extern from "MetaRecognition.h": + cdef struct svm_node_libsvm: + int index + double value \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.pyx b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.pyx new file mode 100644 index 0000000..e0d588b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/libmr.pyx @@ -0,0 +1,261 @@ +# +# libmr.pyx: +# +# @Author Terry Boult tboult at securics com +# @Author Vijay Iyer viyer at securics com +# @Author Michael Wilber mwilber at securics.com +# +# Copyright 2013, Securics Inc. +# +# See accompanying LICENSE agrement for details on rights. +# +# Parts of this technology are subject to SBIR data rights and as +# described in DFARS 252.227-7018 (June 1995) SBIR Data Rights which +# apply to Contract Number: N00014-11-C-0243 and STTR N00014-07-M-0421 +# to Securics Inc, 1870 Austin Bluffs Parkway, Colorado Springs, CO +# 80918 +# +# The Government's rights to use, modify, reproduce, release, perform, +# display, or disclose technical data or computer software marked with +# this legend are restricted during the period shown as provided in +# paragraph (b)(4) of the Rights in Noncommercial Technical Data and +# Computer Software-Small Business Innovative Research (SBIR) Program +# clause contained in the above identified contract. Expiration of +# SBIR Data Rights: Expires four years after completion of the above +# cited project work for this or any other follow-on SBIR contract, +# whichever is later. +# +# No restrictions on government use apply after the expiration date +# shown above. Any reproduction of technical data, computer software, +# or portions thereof marked with this legend must also reproduce the +# markings. + +from libc.stdlib cimport malloc,free +from libcpp cimport bool +from libcpp.string cimport string +cimport numpy as np +import numpy as np + +cdef extern from "MetaRecognition.h": + cdef struct svm_node_libsvm: + int index + double value + +#cdef extern from "MetaRecognition.h": + +cdef extern from "MetaRecognition.h": + + ctypedef enum MR_fitting_type: + complement_reject + positive_reject + complement_model + positive_model + + cppclass MetaRecognition: + MetaRecognition(int scores_to_drop, + int fitting_size, + bool verbose, + double alpha, + int translate_amount) except + + bool is_valid() + void set_translate(double t) + void Reset() + bool Predict_Match(double x, double threshold) + double W_score(double x) + double CDF(double x) + double Inv(double p) + + int ReNormalize(double *invec, double *outvec, int length) + + int FitHigh(double* inputData, int inputDataSize, int fit_size) + + int FitLow(double* inputData, int inputDataSize, int fit_size) + + int FitSVM(svm_node_libsvm* svmdata, int inputDataSize, int label_of_interest, bool label_has_positive_score, + int fit_type, int fit_size ) + + # void Save(FILE *outputFile) const + # void Load(FILE *inputFile) + void Save(char* filename) + void Load(char* filename) + int get_fitting_size() + int set_fitting_size(int nsize) + int get_translate_amount() + int set_translate_amount(int ntrans) + int get_sign() + int set_sign(int nsign) + double get_small_score() + double set_small_score(double nscore) + bool verbose + string to_string() + void from_string(string input) + + +# This is the Python wrapper class. +cdef class MR: + cdef MetaRecognition *thisptr + def __cinit__(self, int scores_to_drop=0, + int fitting_size=9, + bool verbose=False, + double alpha=5.0, + int translate_amount=10000): + """ + Create a new MR object. + """ + self.thisptr = new MetaRecognition(scores_to_drop,fitting_size,verbose,alpha,translate_amount) + def __dealloc__(self): + del self.thisptr + def fit_low(self, inputData, int fit_size): + """Use fit_low if your data is such that is smaller is better. Fits a + MR object to the given data. We'll transform it for you + and keep the transform parameters in the class so later calls + to W_score or CDF do the right thing.""" + cdef double *data + data = malloc(sizeof(double)*len(inputData)) + for i in xrange(len(inputData)): + data[i] = inputData[i] + self.thisptr.FitLow(data, len(inputData), fit_size) + free(data) + def fit_high(self, inputData, int fit_size): + """Use fit_high if your data is such that is larger is better. Fits a + MR object to the given data. We'll transform it for you + and keep the transform parameters in the class so later calls + to W_score or CDF do the right thing. + """ + cdef double *data + data = malloc(sizeof(double)*len(inputData)) + for i in xrange(len(inputData)): + data[i] = inputData[i] + self.thisptr.FitHigh(data, len(inputData), fit_size) + free(data) + + def mr_save(self, filename): + """ + save mr object to file + """ + cdef char *filetosave + filetosave = filename + self.thisptr.Save(filetosave) + + def mr_load(self, filename): + """ + save mr object to file + """ + cdef char *filetosave + filetosave = filename + self.thisptr.Load(filetosave) + + def fit_svm(self, svm_data, inputDataSize, label_of_interest, + label_has_positive_score, fit_type, fit_size ): + """ + Input: + -------- + svm_data: dict containing labels and decision scores. + eg. svm_data['scores'] = [], svm_data['labels'] = [] + inputDataSize : total no of decision scores + label_of_interest : eg +1, -1 + label_has_positive_score : bool i.e 0 or 1 + fit_type : complement_reject=1, positive_reject=2, complement_model=3, positive_model=4 + fit_size : size of tail to be used + + Output: + -------- + None + You can access parameters from weibull fitting using other attributes. + Loading/Saving of weibull model parameters can be done using load/save methods + in MR class + + """ + + # initialize svm_data + cdef svm_node_libsvm *svm_data_to_c + + svm_data_to_c = < svm_node_libsvm* >malloc(inputDataSize * sizeof(svm_node_libsvm) ) + + assert svm_data.has_key("scores") + assert svm_data.has_key("scores") + assert len(svm_data["scores"]) == len(svm_data["labels"]) + assert fit_type in [1, 2, 3, 4] + for i in range(inputDataSize): + svm_data_to_c[i].index = svm_data["labels"][i] + svm_data_to_c[i].value = svm_data["scores"][i] + + print "Data initizalization complete. Now calling C++ code" + self.thisptr.FitSVM(svm_data_to_c, inputDataSize, label_of_interest, label_has_positive_score, fit_type, fit_size) + free(svm_data_to_c) + + property is_valid: + def __get__(self): + return self.thisptr.is_valid() + def reset(self): + self.thisptr.Reset() + def predict_match(self, double x, double threshold = .9999999): + """ + Is X from the "match" distribution (i.e. we reject null hypothesis + of non-match) + + """ + return self.thisptr.Predict_Match(x,threshold) + def w_score(self, double x): + """ + This is the commonly used function. After fitting, it returns the probability of the given score being "correct". It is the same as CDF + """ + return self.thisptr.W_score(x) + def cdf(self, double x): + """ + This is the cummumlative probablity of match being corrrect (or more precisely the probility the score (after transform) being an outlier for the distribution, which given the transforms applied, so bigger is better, this is the probablity the score is correct. + """ + return self.thisptr.CDF(x) + def inv(self, double p): + """ + This is score for which one would obtain CDF probability p (i.e. x such that p = CDF(x)) + """ + return self.thisptr.Inv(p) + def w_score_vector(self, double[::1] invec): + """ + Apply w_score to each element of invec, returning a new vector of W-scores + """ + cdef np.ndarray[np.double_t,ndim=1]new_vec = np.zeros(len(invec), dtype='d') + self.thisptr.ReNormalize(&invec[0], &new_vec[0], len(invec)) + return new_vec + def __str__(self): + """ + Serialize the MR object to a string. Use load_from_string to recover it. + """ + return self.thisptr.to_string() + def __repr__(self): + return "" % str(self) + property tailsize: + def __get__(self): + return self.thisptr.get_fitting_size() + def __set__(self, int nsize): + self.thisptr.set_fitting_size(nsize) + property translate_amount: + def __get__(self): + return self.thisptr.get_translate_amount() + def __set__(self, int ntrans): + self.thisptr.set_translate_amount(ntrans) + property sign: + def __get__(self): + return self.thisptr.get_sign() + def __set__(self, int nsign): + self.thisptr.set_sign(nsign) + property small_score: + def __get__(self): + return self.thisptr.get_small_score() + def __set__(self, double nscore): + self.thisptr.set_small_score(nscore) + property verbose: + def __get__(self): + return self.thisptr.verbose + def __set__(self, bool verbose): + self.thisptr.verbose = verbose + +def load_from_string(str input): + """ + Deserialize an MR object. This turns a string back into an MR object; it is the inverse of str(MR()) + """ + pymr = MR() + pymr.thisptr.from_string(input) + return pymr + diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/malloc.h b/Downstream/Open-Set-Action-Recognition/experiments/libMR/malloc.h new file mode 100644 index 0000000..e52c9e5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/malloc.h @@ -0,0 +1,620 @@ +/* + Default header file for malloc-2.8.x, written by Doug Lea + and released to the public domain, as explained at + http://creativecommons.org/publicdomain/zero/1.0/ + + This header is for ANSI C/C++ only. You can set any of + the following #defines before including: + + * If USE_DL_PREFIX is defined, it is assumed that malloc.c + was also compiled with this option, so all routines + have names starting with "dl". + + * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this + file will be #included AFTER . This is needed only if + your system defines a struct mallinfo that is incompatible with the + standard one declared here. Otherwise, you can include this file + INSTEAD of your system system . At least on ANSI, all + declarations should be compatible with system versions + + * If MSPACES is defined, declarations for mspace versions are included. +*/ + +#ifndef MALLOC_280_H +#define MALLOC_280_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include /* for size_t */ + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 /* define to a value */ +#elif ONLY_MSPACES != 0 +#define ONLY_MSPACES 1 +#endif /* ONLY_MSPACES */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ + +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ + +#if !ONLY_MSPACES + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlposix_memalign posix_memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlmalloc_footprint_limit malloc_footprint_limit +#define dlmalloc_set_footprint_limit malloc_set_footprint_limit +#define dlmalloc_inspect_all malloc_inspect_all +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#define dlbulk_free bulk_free +#endif /* USE_DL_PREFIX */ + +#if !NO_MALLINFO +#ifndef HAVE_USR_INCLUDE_MALLOC_H +#ifndef _MALLOC_H +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ +#ifndef STRUCT_MALLINFO_DECLARED +#define STRUCT_MALLINFO_DECLARED 1 +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; +#endif /* STRUCT_MALLINFO_DECLARED */ +#endif /* _MALLOC_H */ +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* !NO_MALLINFO */ + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cuase the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ +void* dlrealloc(void*, size_t); + +/* + realloc_in_place(void* p, size_t n) + Resizes the space allocated for p to size n, only if this can be + done without moving p (i.e., only if there is adjacent space + available if n is greater than p's current allocated size, or n is + less than or equal to p's size). This may be used instead of plain + realloc if an alternative allocation strategy is needed upon failure + to expand space; for example, reallocation of a buffer that must be + memory-aligned or cleared. You can use realloc_in_place to trigger + these alternatives only when needed. + + Returns p if successful; otherwise null. +*/ +void* dlrealloc_in_place(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + int posix_memalign(void** pp, size_t alignment, size_t n); + Allocates a chunk of n bytes, aligned in accord with the alignment + argument. Differs from memalign only in that it (1) assigns the + allocated memory to *pp rather than returning it, (2) fails and + returns EINVAL if the alignment is not a power of two (3) fails and + returns ENOMEM if memory cannot be allocated. +*/ +int dlposix_memalign(void**, size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt: + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +/* + malloc_footprint_limit(); + Returns the number of bytes that the heap is allowed to obtain from + the system, returning the last value returned by + malloc_set_footprint_limit, or the maximum size_t value if + never set. The returned value reflects a permission. There is no + guarantee that this number of bytes can actually be obtained from + the system. +*/ +size_t dlmalloc_footprint_limit(void); + +/* + malloc_set_footprint_limit(); + Sets the maximum number of bytes to obtain from the system, causing + failure returns from malloc and related functions upon attempts to + exceed this value. The argument value may be subject to page + rounding to an enforceable limit; this actual value is returned. + Using an argument of the maximum possible size_t effectively + disables checks. If the argument is less than or equal to the + current malloc_footprint, then all future allocations that require + additional system memory will fail. However, invocation cannot + retroactively deallocate existing used memory. +*/ +size_t dlmalloc_set_footprint_limit(size_t bytes); + +/* + malloc_inspect_all(void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg); + Traverses the heap and calls the given handler for each managed + region, skipping all bytes that are (or may be) used for bookkeeping + purposes. Traversal does not include include chunks that have been + directly memory mapped. Each reported region begins at the start + address, and continues up to but not including the end address. The + first used_bytes of the region contain allocated data. If + used_bytes is zero, the region is unallocated. The handler is + invoked with the given callback argument. If locks are defined, they + are held during the entire traversal. It is a bad idea to invoke + other malloc functions from within the handler. + + For example, to count the number of in-use chunks with size greater + than 1000, you could write: + static int count = 0; + void count_chunks(void* start, void* end, size_t used, void* arg) { + if (used >= 1000) ++count; + } + then: + malloc_inspect_all(count_chunks, NULL); + + malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. +*/ +void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), + void* arg); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ + +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be freed when it is no longer needed. This can be + done all at once using bulk_free. + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be freed when it is no longer needed. This can be + done all at once using bulk_free. + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + +/* + bulk_free(void* array[], size_t n_elements) + Frees and clears (sets to null) each non-null pointer in the given + array. This is likely to be faster than freeing them one-by-one. + If footers are used, pointers that have been allocated in different + mspaces are not freed or cleared, and the count of all such pointers + is returned. For large arrays of pointers with poor locality, it + may be worthwhile to sort this array before calling bulk_free. +*/ +size_t dlbulk_free(void**, size_t n_elements); + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. + + malloc_stats is not compiled if NO_MALLOC_STATS is defined. +*/ +void dlmalloc_stats(void); + +#endif /* !ONLY_MSPACES */ + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(const void*); + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_track_large_chunks controls whether requests for large chunks + are allocated in their own untracked mmapped regions, separate from + others in this mspace. By default large chunks are not tracked, + which reduces fragmentation. However, such chunks are not + necessarily released to the system upon destroy_mspace. Enabling + tracking by setting to true may increase fragmentation, but avoids + leakage when relying on destroy_mspace to release all memory + allocated using this space. The function returns the previous + setting. +*/ +int mspace_track_large_chunks(mspace msp, int enable); + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +/* + The following operate identically to their malloc counterparts + but operate only for the given mspace argument +*/ +void* mspace_malloc(mspace msp, size_t bytes); +void mspace_free(mspace msp, void* mem); +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); +void* mspace_realloc(mspace msp, void* mem, size_t newsize); +void* mspace_realloc_in_place(mspace msp, void* mem, size_t newsize); +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); +size_t mspace_bulk_free(mspace msp, void**, size_t n_elements); +size_t mspace_usable_size(const void* mem); +void mspace_malloc_stats(mspace msp); +int mspace_trim(mspace msp, size_t pad); +size_t mspace_footprint(mspace msp); +size_t mspace_max_footprint(mspace msp); +size_t mspace_footprint_limit(mspace msp); +size_t mspace_set_footprint_limit(mspace msp, size_t bytes); +void mspace_inspect_all(mspace msp, + void(*handler)(void *, void *, size_t, void*), + void* arg); +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif + +#endif /* MALLOC_280_H */ diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/setup.py b/Downstream/Open-Set-Action-Recognition/experiments/libMR/setup.py new file mode 100644 index 0000000..08f61c0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/setup.py @@ -0,0 +1,20 @@ +from distutils.core import setup +from distutils.extension import Extension +from Cython.Distutils import build_ext +from Cython.Build import cythonize +import sys +import numpy +#ext_modules = [Extension("libmr", ["libmr.pyx", "MetaRecognition.cpp"])] + +setup( + ext_modules = cythonize(Extension('libmr', + ["libmr.pyx", + "MetaRecognition.cpp", + "weibull.c" + ], + include_dirs = [".", numpy.get_include()], + language="c++", + )), + data_files = [('.', ['MetaRecognition.h', 'weibull.h'])], + +) diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/test_libmr.py b/Downstream/Open-Set-Action-Recognition/experiments/libMR/test_libmr.py new file mode 100644 index 0000000..9e866d1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/test_libmr.py @@ -0,0 +1,37 @@ +import scipy as sp +import sys, os +try: + import libmr + print("Imported libmr succesfully") +except ImportError: + print("Cannot import libmr") + sys.exit() + +import pickle +svm_data = {} +svm_data["labels"] = [1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1 , -1, -1, -1, -1, -1] +svm_data["scores"] = sp.randn(100).tolist() +fit_data = sp.rand(3) +def main(): + + mr = libmr.MR() + datasize = len(svm_data["scores"]) + mr.fit_svm(svm_data, datasize, 1, 1, 1, 10) + print(fit_data) + print(mr.w_score_vector(fit_data)) + mr.mr_save("meta_rec.model") + datadump = {} + datadump = {"data": fit_data} + + f = open("data.dump", "w") + pickle.dump(datadump, f) + f.close() + print(dir(mr)) + + +if __name__ == "__main__": + main() diff --git a/Downstream/Open-Set-Action-Recognition/experiments/libMR/weibull.c b/Downstream/Open-Set-Action-Recognition/experiments/libMR/weibull.c new file mode 100644 index 0000000..c3b282a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/libMR/weibull.c @@ -0,0 +1,1062 @@ +/* \index + * weibull.cpp provides the core functionality for computing weibull fittings, as well as CDF and INF given parms + * + * + * @Author Brian Heflin + * @Author Walter Scheirer + * @Author Terry Boult tboult@securics.com + * + * Copyright 2010, 2011, Securics Inc. + * + * @section LICENSE + * See accompanying LICENSE agrement for full details on rights. + * + * Parts of this technology are subject to SBIR data rights and as described in DFARS 252.227-7018 (June 1995) SBIR Data Rights which apply to Contract Number: N00014-11-C-0243 and STTR N00014-07-M-0421 to Securics Inc, 1870 Austin Bluffs Parkway, Colorado Springs, CO 80918 + * + *The Government's rights to use, modify, reproduce, release, perform, display, or disclose technical data or computer software marked with this legend are restricted during the period shown as provided in paragraph (b)(4) of the Rights in Noncommercial Technical Data and Computer Software-Small Business Innovative Research (SBIR) Program clause contained in the above identified contract. Expiration of SBIR Data Rights: Expires four years after completion of the above cited project work for this or any other follow-on SBIR contract, whichever is later. + * + * No restrictions on government use apply after the expiration date shown above. Any reproduction of technical data, computer software, or portions thereof marked with this legend must also reproduce the markings. + * + * + * See overall comments in weibull.h + * + */ + + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include +#include +#include +#include "malloc.h" +#include +#include + +#include "weibull.h" + +#ifdef __cplusplus +extern "C" { +#endif + + /* if WEIBULL_USE_ASSERTS is defined, the code will use asserts to ensure its requirements are true, otherwise it returns error codes. Default is not defined */ + /* if WEIBULL_IGNORE_ERRORS is defined, the code will just presume things will work out and not waste time on testing for error. Default is not defined */ + + /*#define WEIBULL_USE_ASSERTS */ + /* #define WEIBULL_IGNORE_ERRORS */ + +#ifdef WEIBULL_USE_ASSERTS +#include +#endif + + + + +#ifdef WEIBULL_IGNORE_ERRORS + int weibull_fit_verbose_debug=0; +#define WEIBULL_ERROR_HANDLER(x,msg) +#else + int weibull_fit_verbose_debug=1; + static int tthrow(int x, const char* msg){if(weibull_fit_verbose_debug) fprintf(stderr,"%s\n",msg); return x;} +#define WEIBULL_ERROR_HANDLER(x,msg) return tthrow(x,msg) +#endif + + + + + + + /* weibull_cdf computes the probability (given our assumptions) that the value x is an outlier ABOVE the fit distribution. if the distribution was non-match data, then it provides this probability that x is a match score. If data was match-data then it would be the probability of it being a larger non-match. + computes @f[ 1-e^{{x/scale}^{shape}} @f] + + @param x the location at which to compute the probability of being an outlier + @param scale the scale parameter of the Weibull. This is the first element in weibullparms (as computed by our weibull_fit) + @param shape the scale parameter of the Weibull. This is the first second in weibullparms (as computed by our weibull_fit) + @return if in the range [0-1] it is the probability of X being an outlier. Any value < 0 is an error code. returns -1 for invalid scale <=0 , -2 for invalid shape <=0 + * + */ +double weibull_cdf(double x, double scale, double shape) +{ + double cdf; + double tempVal, tempVal1; + + if(x<0) return 0; /* optimize for the simple case that can be common in playing with SVMs. (a valid value, and can ignore other possible errors) */ + +#ifndef WEIBULL_IGNORE_ERRORS +#ifdef WEIBULL_USE_ASSERTS + assert(scale>0); + assert(shape>0); +#else + if(scale<=0) WEIBULL_ERROR_HANDLER( -1, "Bad scale in weibull_cdf"); + if(shape <=0) WEIBULL_ERROR_HANDLER(-2, "Bad shape in weibull_cdf"); +#endif +#endif + + + + tempVal = x/scale; + tempVal1 = pow(tempVal,shape); + cdf = 1-exp(-1*tempVal1); + + return cdf; + +} + + + /* weibull_inv computes the inverse weibull, i.e. returns the score S (given our assumptions) such that x=weibull_cdf(s,scale,shape). Note it estimates from above, so if x=1.0 expect an answer of inf (infinity). + + @param x the location at which you compute the inverse (must be between [0,1] + @param scale the scale parameter of the weibull. This is the first element in weibullparms (as computed by our weibull_fit) + @param shape the scale parameter of the weibull. This is the first second in weibullparms (as computed by our weibull_fit) + @return if X in the range [0-1], return S such that x=weibull_cdf(s,scale,shape). The return value is in the range [0,inf]. Any return value < 0 is an error code. returns -1 for invalid scale <=0 , -2 for invalid shape <=0 -3 for X<0, -4 for x >1 + * + */ +double weibull_inv(double x, double scale, double shape) +{ + double inv; + double tempVal, tempVal1, tempVal2; +#ifndef WEIBULL_IGNORE_ERRORS +#ifdef WEIBULL_USE_ASSERTS + assert(scale>0); + assert(shape>0); + assert(x>=0); + assert(x<=1); +#else + if(scale<=0) WEIBULL_ERROR_HANDLER( -1, "Bad scale in weibull_cdf"); + if(shape <=0) WEIBULL_ERROR_HANDLER(-2, "Bad shape in weibull_cdf"); + if(x<0) WEIBULL_ERROR_HANDLER(-3,"Invalid X<0 in weibull_ing"); + if(x>1) WEIBULL_ERROR_HANDLER(-4,"Invalid X>1 in weibull_ing"); +#endif +#endif + + tempVal = log(1-x); + tempVal *= -1; + + tempVal1 = 1/shape; + tempVal2 = pow(tempVal, tempVal1); + + inv = scale * tempVal2; + + return inv; +} + + /* + printWeibullBuildInfo prints, to the file provied as an argument some information about the current package build information + */ + +void printWeibullBuildInfo(FILE *fh) +{ + if(fh == NULL) + fh = stdout; +#ifdef HAVE_CONFIG_H + fprintf(fh, "Project name: %s\n", PACKAGE_STRING); +#endif + fprintf(fh, "Git SHA Hash $Id: e6733c27c2f37c37aa58fe5f2b7d30aa084cd1e5 $\n"); +} + +/* abandon all hope ye who pass this point. We be mixing stuff in ugly ugly way including some conversion from fortran (erf approximations etc) and some non-obvious MLE estimations. */ + + +#ifdef __cplusplus +} +#endif + + +#ifdef _WIN32 +static __inline int fix(double n) +#else +static inline int fix(double n) +#endif +{ + return (int)n; +} + + + + +/* erf function, based on Fortran calcerf which is turn is basd on + "Rational Chebyshev approximations for the error function" C by W. J. Cody, Math. Comp., 1969, PP. 631-638. + This code uses rational functions that theoretically approximate erf(x) and erfc(x) to at least 18 significant decimal digits, and on IEEE hardware is generally to near machine precision. +Note there are accelerated versions for GPUs and in the Intel Math Kernel library, so if you do this a lot it may be worh using those libraries. + */ + +static double wcalcerfc(double x) +{ + double PI = 3.141592653589793238462; + double thresh = 0.46875; + + double a [] = {3.16112374387056560e00, 1.13864154151050156e02, 3.77485237685302021e02, 3.20937758913846947e03, 1.85777706184603153e-1}; + double b [] = {2.36012909523441209e01, 2.44024637934444173e02, 1.28261652607737228e03, 2.84423683343917062e03}; + double c [] = {5.64188496988670089e-1, 8.88314979438837594e00, 6.61191906371416295e01, 2.98635138197400131e02, 8.81952221241769090e02, 1.71204761263407058e03, 2.05107837782607147e03, 1.23033935479799725e03, 2.15311535474403846e-8}; + double d [] = {1.57449261107098347e01, 1.17693950891312499e02, 5.37181101862009858e02, 1.62138957456669019e03, 3.29079923573345963e03, 4.36261909014324716e03, 3.43936767414372164e03, 1.23033935480374942e03}; + double p [] = {3.05326634961232344e-1, 3.60344899949804439e-1, 1.25781726111229246e-1, 1.60837851487422766e-2, 6.58749161529837803e-4, 1.63153871373020978e-2}; + double q [] = {2.56852019228982242e00, 1.87295284992346047e00, 5.27905102951428412e-1, 6.05183413124413191e-2, 2.33520497626869185e-3}; + + double result=0; + double xk; + double absxk; + double y,z; + double xnum,xden; + double tempVal, tempVal1; + double del; + int i; + + xk = x; + absxk = fabs(xk); + + if (absxk <= thresh) /* evaluate erf for |x| <= 0.46875 */ + { + y = absxk; + z = y * y; + xnum = a[4]*z; + xden = z; + + for (i=0; i<3; i++) + { + xnum = (xnum + a[i]) * z; + xden = (xden + b[i]) * z; + } + + tempVal=xk*(xnum + a[3]); + tempVal1=xden + b[3]; + + result = tempVal/tempVal1; + result = 1 - result; + } + else if (absxk <= 4.0)/* evaluate erfc for 0.46875 <= |x| <= 4.0 */ + { + y = absxk; + xnum = c[8]*y; + xden = y; + + for (i = 0; i< 7; i++) + { + xnum = (xnum + c[i]) * y; + xden = (xden + d[i]) * y; + } + + tempVal=xnum + c[7]; + tempVal1=xden + d[7]; + result = tempVal/tempVal1; + + tempVal=fix(y*16); + tempVal1=16; + z=tempVal/tempVal1; + + del = (y-z)*(y+z); + result = exp((-1*z)*z) * exp((-1*del)) * result; + + } + else /*% evaluate erfc for |x| > 4.0 */ + { + y = absxk; + z = 1/(y*y); + xnum = p[5]*z; + xden = z; + for (i = 0; i<4; i++) + { + xnum = (xnum + p[i]) * z; + xden = (xden + q[i]) * z; + } + + tempVal=z*(xnum + p[4]); + tempVal1=xden + q[4]; + result=tempVal/tempVal1; + + tempVal=1/sqrt(PI); + tempVal -= result; + tempVal1 = y; + result=tempVal/tempVal1; + + tempVal=fix(y*16); + z=tempVal/16; + del = (y-z) * (y+z); + result = exp((-1*z)*z) * exp((-1*del)) * result; + + /*check to see if result is finite */ + { + int test; +#ifdef _WIN32 + test = _finite(result); +#else + test = isfinite(result); +#endif + if (test == 0) result = 0; + } + } + + /*fix up for negative argument, erf, etc. */ + if (xk < -thresh) + { + result = 2 - result; + } + else if (xk < -thresh)/* jint must = 2 */ + { + if (xk < -26.628) /* if less than XNEG (the largest negative argument acceptable to ERFCX) by IEEE standard */ + { + result = 1000000; /*%%ERROR (INF) */ + WEIBULL_ERROR_HANDLER(-8,"wcalcerfc helper function failed to converge.." ); + } + else + { + tempVal=fix(xk*16); + tempVal1=16; + z=tempVal/tempVal1; + del = (xk-z)*(xk+z); + y = exp(z*z) * exp(del); + result = (y+y) - result; + } + } + + return result; +} + +/* + +Calculate the inverse complementary error function of the input argument y, for y in the interval [0, 2]. The inverse complementary error function find the value x that satisfies the equation y = erfc(x). based on fortran code based on "Rational Chebyshev approximations for the error function" C by W. J. Cody, Math. Comp., 1969, PP. 631-638. +Note there are accelerated versions for GPUs and in the Intel Math Kernel library, so if you do this a lot it may be worh using those libraries. + */ + + +static double derfcinv(double x) +{ + + double a [] = {1.370600482778535e-02, -3.051415712357203e-01, 1.524304069216834, -3.057303267970988, 2.710410832036097, -8.862269264526915e-01}; + double b [] = {-5.319931523264068e-02, 6.311946752267222e-01, -2.432796560310728, 4.175081992982483, -3.320170388221430}; + double c [] = {5.504751339936943e-03, 2.279687217114118e-01, 1.697592457770869, 1.802933168781950, -3.093354679843504, -2.077595676404383}; + double d [] = {7.784695709041462e-03, 3.224671290700398e-01, 2.445134137142996, 3.754408661907416}; + double q,r,u; + + double result = 0; + double xlow = 0.0485000000; + double xhigh = 1.9515000000; + double tosp = 1.1283791670955126645; + double xk; + double tempVal, tempVal1, tempVal2; + + xk = x; + + /*Rational approximation for central region */ + if ((xlow <= xk) && (xk <= xhigh)) + { + q = xk - 1; + r = q*q; + + tempVal1=(((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q; + tempVal2=((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1; + + result = tempVal1/tempVal2; + } + + /*Rational approximation for lower region */ + else if ((0 < xk) && (xk < xlow)) + { + tempVal=xk/2; + q = sqrt(-2*log(tempVal)); + + tempVal1=((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]; + tempVal2=(((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1; + + result=tempVal1/tempVal2; + } + + /*Rational approximation for upper region */ + else if ((xhigh < xk) && (xk < 2)) + { + tempVal=xk/2; + q = sqrt(-2*log(1-tempVal)); + tempVal1= -1*(((((c[0]*q+c[1])*q+c[2]))*q+c[3])*q+c[4])*q+c[5]; + tempVal2= (((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1; + + result = tempVal1/tempVal2; + + } + + /* Root finding with Halley's method */ + /* For f = erfc(x) - y so then f' = -2/sqrt(pi)*exp(-x^2), and f" = -2*x*f' */ + + + { + double erfcx = wcalcerfc(result); + if(erfcx <0) WEIBULL_ERROR_HANDLER(-9999,"derfcinv fails because wcalcerfc failed"); + + tempVal= (-1*tosp); + tempVal1=(-1*result)*(result); + tempVal1=exp(tempVal1); + tempVal2=tempVal*tempVal1; + + u = (erfcx - xk)/tempVal2; + } + + tempVal=(1+result*u); + + tempVal1= u / tempVal; + + result = result - tempVal1; + + return result; + +} + + +static int weibull_neg_log_likelihood(double* nlogL, double* acov, double* weibulparms, double* data, + double* censoring, double* frequency, int size) +{ + int i; + double mu = weibulparms[0]; /* scale */ + double sigma = weibulparms[1]; /* shape */ + + double* z = (double*)malloc(sizeof(double)*size); + double* expz = (double*)malloc(sizeof(double)*size); + double* L = (double*)malloc(sizeof(double)*size); + double logSigma; + + if (sigma <= 0) WEIBULL_ERROR_HANDLER(-1,"Bad sigma (shape) in weibull_neg_log_likelihood.."); + + logSigma = log(sigma); + + for (i = 0; i < size; i++) + { + z[i] = (data[i]-mu)/sigma; + expz[i] = exp(z[i]); + L[i] = (z[i]-logSigma)*(1-censoring[i]-expz[i]); + } + + /* Sum up the individual contributions, and return the negative log-likelihood. */ + for (i=0; i 0) == (fb > 0)) + { + WEIBULL_ERROR_HANDLER(-4,"ERROR: wdfzero says function values at the interval endpoints must differ in sign\n"); + } + + fc = fb; + + /*Main loop, exit from middle of the loop */ + while (fb != 0) + { + /* Insure that b is the best result so far, a is the previous */ + /* value of b, and that c is on the opposite size of the zero from b. */ + if ((fb > 0) == (fc > 0)) + { + c = a; + fc = fa; + d = b - a; + e = d; + } + + { + double absFC; + double absFB; + + absFC=fabs(fc); + absFB=fabs(fb); + + if (absFC < absFB) + { + a = b; + b = c; + c = a; + fa = fb; + fb = fc; + fc = fa; + } + } + + /*set up for test of Convergence, is the interval small enough? */ + m = 0.5*(c - b); + + { + double absB, absM, absFA,absFB, absE; + absB=fabs(b); + absM=fabs(m); + absFA=fabs(fa); + absFB=fabs(fb); + absE=fabs(e); + + { + tolerance = 2.0*tol *((absB > 1.0) ? absB : 1.0); + + if ((absM <= tolerance) | (fb == 0.0)) + break; + + /*Choose bisection or interpolation */ + if ((absE < tolerance) | (absFA <= absFB)) + { + /*Bisection */ + d = m; + e = m; + } + else + { + /*Interpolation */ + s = fb/fa; + + if (a == c) + { + /*Linear interpolation */ + p = 2.0*m*s; + q = 1.0 - s; + } + else + { + /*Inverse quadratic interpolation */ + q = fa/fc; + r = fb/fc; + p = s*(2.0*m*q*(q - r) - (b - a)*(r - 1.0)); + q = (q - 1.0)*(r - 1.0)*(s - 1.0); + } + + if (p > 0) + q = -1.0*q; + else + p = -1.0*p; + } + } + + + { + double tempTolerance = tolerance*q; + double absToleranceQ; + double absEQ; + double tempEQ = (0.5 * e * q); + absToleranceQ=fabs(tempTolerance); + absEQ=fabs(tempEQ); + + /*Is interpolated point acceptable */ + if ((2.0*p < 3.0*m*q - absToleranceQ) & (p < absEQ)) + { + e = d; + d = p/q; + } + else + { + d = m; + e = m; + } + } + + } /*Interpolation */ + + /*Next point */ + a = b; + fa = fb; + + if (fabs(d) > tolerance) + b = b + d; + else if (b > c) + b = b - tolerance; + else + b = b + tolerance; + + fb = weibull_scale_likelihood(b,x0,frequency,meanUncensored,size); + + }/*Main loop (While) */ + + fval=weibull_scale_likelihood(b,x0,frequency,meanUncensored,size); + *likelihood_value = fval; + *sigmahat=b; + + return 1; +} + +static int wnorminv(double* x, double* p,double *mu, double* sigma, int size) +{ + double* tempP = (double *)malloc(sizeof(double)*4); + double* tempMU = (double *)malloc(sizeof(double)*4); + double* tempSigma = (double *)malloc(sizeof(double)*4); + + tempP[0]=p[0]; + tempP[1]=p[0]; + tempP[2]=p[1]; + tempP[3]=p[1]; + + tempMU[0]=mu[0]; + tempMU[1]=mu[0]; + tempMU[2]=mu[1]; + tempMU[3]=mu[1]; + + tempSigma[0]=sigma[0]; + tempSigma[1]=sigma[0]; + tempSigma[2]=sigma[1]; + tempSigma[3]=sigma[1]; + + { + double myTemp; + double tempVal1, tempVal2; + double* x0 = (double*)malloc(sizeof(double)*4); + + myTemp=tempP[0]; + { + double terfc=derfcinv(2*myTemp); + if(terfc==-9999) WEIBULL_ERROR_HANDLER(-7,"wnorminv fails since derfcinv"); + tempVal1=(-1*sqrt((double)2))* terfc; + + myTemp=tempP[2]; + terfc=derfcinv(2*myTemp); + if(terfc==-9999) WEIBULL_ERROR_HANDLER(-7,"wnorminv fails since derfcinv"); + tempVal2=(-1*sqrt((double)2))* terfc; + } + + x0[0]=tempVal1; + x0[2]=tempVal1; + x0[1]=tempVal2; + x0[3]=tempVal2; + { + int i; + for (i=0; i0); +#else + if(inputData[i]<=0) + WEIBULL_ERROR_HANDLER( -1,"cannot have data <=0 in call to weibull_fit\n"); +#endif +#endif + + inputData[i]=log(inputData[i]); + } + /* ********************************************** */ + { + double mySum; + + mySum=0; + for (i=0; i0); + assert(n>1); +#else + /* too much uncensored data means a plateau with no max */ + if(nuncensored<=0) WEIBULL_ERROR_HANDLER(-2,"Insufficient distinct data, hit a plateau in weibull_fit\n"); +#endif +#endif + + } + } + + /* declar local for max/range computation ********************************************** */ + { + double maxVal, minVal; + double range, maxx; + double tempVal; + + maxVal=-1000000000; + minVal=1000000000; + + for (i=0; i maxVal) + maxVal=tempVal; + } + + range = maxVal - minVal; + maxx = maxVal; +#ifndef WEIBULL_IGNORE_ERRORS +#ifdef WEIBULL_USE_ASSERTS + assert(range>0); +#else + if(range<=0) WEIBULL_ERROR_HANDLER(-2,"Insufficient distinct data range in weibull_fit\n"); +#endif +#endif + /*Shift x to max(x) == 0, min(x) = -1 to make likelihood eqn more stable. */ + /* ********************************************** */ + { + double mean, myStd; + double sigmahat; + double meanUncensored; + double upper, lower; + double search_band[2]; + + + + for (i=0; i 0) + { + upper=sigmahat; + lower=0.5*upper; + + while((tempVal=weibull_scale_likelihood(lower,x0,frequency,meanUncensored,size)) > 0) + { + upper = lower; + lower = 0.5 * upper; + + if (lower < FULL_PRECISION_MIN) + { + WEIBULL_ERROR_HANDLER(-3,"MLE in wbfit Failed to converge leading for underflow in root finding\n"); + } + } + } + else + { + lower = sigmahat; + upper = 2.0 * lower; + + while ((tempVal=weibull_scale_likelihood(upper,x0,frequency,meanUncensored,size)) < 0) + { + lower=upper; + upper = 2 * lower; + /* check for overflow, no finite root */ +#ifndef WEIBULL_IGNORE_ERRORS +#ifdef WEIBULL_USE_ASSERTS + assert(upper <= FULL_PRECISION_MAX); +#else + if(upper > FULL_PRECISION_MAX) WEIBULL_ERROR_HANDLER(-3,"MLE in wbfit Failed to converge leading for overflow in root finding\n"); +#endif +#endif + } + } + /* ****************************************** */ + search_band[0]=lower; + search_band[1]=upper; + + /* ... Next we go find the root (zero) of the likelihood eqn which wil be the MLE for sigma. */ + /* then the MLE for mu has an explicit formula from that. */ + + { + double err; + double likelihood_value; + + + code = wdfzero(&sigmahat,&likelihood_value,&err,search_band,tol,x0,frequency,meanUncensored,size); + +#ifndef WEIBULL_IGNORE_ERRORS +#ifdef WEIBULL_USE_ASSERTS + assert(code == 1); +#else + if(code != 1) WEIBULL_ERROR_HANDLER(-4, "weibull_fit failed, could not find solution in MLE. Probably has insufficnt data distribution (e.g. all same value)...\n"); +#endif +#endif + } + + /* ****************************************** */ + { + double muHat; + double sumfrequency; + + muHat=0; + sumfrequency=0; + + for (i=0; i1 + * + */ +double weibull_inv(double x, double scale, double shape); + + /** + weibull_fit does a maximum likelihood fitting to estimate the shape and scale parameters of a weibull probability distributon @f[ \frac{shape}{scale} \left(\frac{x}{scale} \cdot e^{-{\left(\frac{x}{scale}\right)}^{shape}}\right)@f] + + @param weibull_parms is an array of 2 doubles, which must be preallocated. On successful completeion it will have shape and scale respectively. + @param wparm_confidenceintervals is an array of 4 doubles, which must be preallocated. On successful completeion it will have confidence interval for shape in the first two item and the CI for scale in the second two items + @param inputData is a pointer the data to use for fitting the distribution. It must have at least size elements + @param size is the size of the data to be used for fitting. + @param alpha is parameter for Confidence interval size estimation. + @return return should be 1 if all went well. Values < 0 imply errors in fitting or data. -1 means some data was negative, -2 means bad data range (e.g. all the same) -3 or lower means MLE did not converge. + + */ +int weibull_fit(double* weibullparms, double* wparm_confidenceintervals, double* inputData, double alpha, int size); + + + /** + Print information about this build to a file descriptor. Used for checking what is loaded for supporting people + */ +void printWeibullBuildInfo(FILE *fh); +#ifdef __cplusplus +} +#endif + +#endif +#endif diff --git a/Downstream/Open-Set-Action-Recognition/experiments/mae/finetune_mae_edlnokl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/mae/finetune_mae_edlnokl_ucf101.sh new file mode 100644 index 0000000..0bd640a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/mae/finetune_mae_edlnokl_ucf101.sh @@ -0,0 +1,15 @@ +#!/bin/bash +pwd_dir=$pwd +cd ../../ +GPUS=$1 +PORT=${PORT:-29498} + +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT tools/train.py configs/recognition/mae/finetune_ucf101_mae_edlnokl.py \ + --work-dir work_dirs/mae/ky \ + --validate \ + --seed 0 \ + --deterministic \ + --launcher pytorch \ + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/mae/run_get_mae_threshold.sh b/Downstream/Open-Set-Action-Recognition/experiments/mae/run_get_mae_threshold.sh new file mode 100644 index 0000000..a077b78 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/mae/run_get_mae_threshold.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +# DEVICE=$1 +MODEL=$1 +BATCHSIZE=$2 +TRAIN_DATA='data/ucf101/ucf101_train_split_1_videos.txt' +RESULT_DIR='experiments/mae/results' +GPUS=$3 +PORT=${PORT:-29500} + +case ${MODEL} in + dnn) + # get the BALD threshold for i3d model trained on UCF-101 + python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT experiments/get_threshold_dist.py \ + --config configs/recognition/mae/inference_vae_dnn.py \ + --checkpoint work_dirs/vae/finetune_ucf101_vae_dnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --launcher pytorch \ + --result_prefix ${RESULT_DIR}/MAE_DNN_BALD + ;; + bnn) + # get the BALD threshold for I3D_BNN model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/i3d/inference_i3d_bnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_bnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/I3D_BNN_BALD + ;; + edlnokl) + # get the EDL threshold for I3D_EDL model trained on UCF-101 + python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT experiments/get_threshold_dist.py \ + --config configs/recognition/mae/inference_mae_enn.py \ + --checkpoint work_dirs/mae/ky/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/MAE_EDLNoKL_EDL \ + --launcher pytorch + ;; + edlnokl_avuc_debias) + # get the EDL threshold for I3D_EDL_AvUC_Debias model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/i3d/inference_i3d_enn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/I3D_EDLNoKLAvUCDebias_EDL + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/mae/run_ood_mae_dist_detection.sh b/Downstream/Open-Set-Action-Recognition/experiments/mae/run_ood_mae_dist_detection.sh new file mode 100644 index 0000000..47d4fe6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/mae/run_ood_mae_dist_detection.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +#DEVICE=$1 +OOD_DATASET=$1 +MODEL=$2 +GPUS=$3 +PORT=${PORT:-29498} +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + ;; + MiT) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/mae/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT experiments/ood_detection_dist.py \ + --config configs/recognition/mae/inference_vae_dnn.py \ + --checkpoint work_dirs/mae/flow/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/MAE_DNN_BALD_${OOD_DATASET} \ + --launcher pytorch + ;; + bnn) + # Evidential Deep Learning (without KL divergence loss term) + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/i3d/inference_i3d_bnn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_bnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/I3D_BNN_BALD_${OOD_DATASET} + ;; + edlnokl) + # Evidential Deep Learning (without KL divergence loss term) + python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT experiments/ood_detection_dist.py \ + --config configs/recognition/mae/inference_mae_enn.py \ + --checkpoint work_dirs/mae/ky/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --launcher pytorch \ + --result_prefix ${RESULT_DIR}/MAE_EDLNoKL_EDL_${OOD_DATASET} + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/i3d/inference_i3d_enn.py \ + --checkpoint work_dirs/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/I3D_EDLNoKLAvUCCED_EDL_${OOD_DATASET} + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/ood_detection.py b/Downstream/Open-Set-Action-Recognition/experiments/ood_detection.py new file mode 100644 index 0000000..90bbb3f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/ood_detection.py @@ -0,0 +1,259 @@ +import argparse +import os +import os.path as osp +import torch +import mmcv +from mmaction.apis import init_recognizer +from mmcv.parallel import collate, scatter +from operator import itemgetter +from mmaction.datasets.pipelines import Compose +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel +import numpy as np +from scipy.special import xlogy +import matplotlib.pyplot as plt +import matplotlib.ticker as ticker +from tqdm import tqdm +import torch.multiprocessing +torch.multiprocessing.set_sharing_strategy('file_system') + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--uncertainty', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method') + parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes') + # data config + parser.add_argument('--ind_data', help='the split file of in-distribution testing data') + parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data') + # env config + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + args = parser.parse_args() + return args + + +def apply_dropout(m): + if type(m) == torch.nn.Dropout: + m.train() + + +def update_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + +def compute_uncertainty(predictions, method='BALD'): + """Compute the entropy + scores: (B x C x T) + """ + expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,) + entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes) + if method == 'Entropy': + uncertain_score = entropy_expected_p + elif method == 'BALD': + expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar) + uncertain_score = entropy_expected_p - expected_entropy + else: + raise NotImplementedError + if not np.all(np.isfinite(uncertain_score)): + uncertain_score[~np.isfinite] = 9999 + return uncertain_score + + +def run_stochastic_inference(model, data_loader, npass=10): + # run inference + model = MMDataParallel(model, device_ids=[0]) + all_confidences, all_uncertainties, all_results, all_gts = [], [], [], [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + all_scores = [] + with torch.no_grad(): + for n in range(npass): + # set new random seed + update_seed(n * 1234) + scores = model(return_loss=False, **data) + # gather results + all_scores.append(np.expand_dims(scores, axis=-1)) + all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + # compute the uncertainty + uncertainty = compute_uncertainty(all_scores, method=args.uncertainty) + all_uncertainties.append(uncertainty) + + # compute the predictions and save labels + mean_scores = np.mean(all_scores, axis=-1) + preds = np.argmax(mean_scores, axis=1) + all_results.append(preds) + conf = np.max(mean_scores, axis=1) + all_confidences.append(conf) + + labels = data['label'].numpy() + all_gts.append(labels) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + all_confidences = np.concatenate(all_confidences, axis=0) + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + all_results = np.concatenate(all_results, axis=0) + all_gts = np.concatenate(all_gts, axis=0) + + return all_confidences, all_uncertainties, all_results, all_gts + + +def run_evidence_inference(model, data_loader, evidence='exp'): + # set new random seed + update_seed(1234) + # get the evidence function + if evidence == 'relu': + from mmaction.models.losses.edl_loss import relu_evidence as get_evidence + elif evidence == 'exp': + from mmaction.models.losses.edl_loss import exp_evidence as get_evidence + elif evidence == 'softplus': + from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence + else: + raise NotImplementedError + num_classes = model.cls_head.num_classes + + # run inference + model = MMDataParallel(model, device_ids=[0]) + all_confidences, all_uncertainties, all_results, all_gts = [], [], [], [] + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + output = model(return_loss=False, **data) + evidence = get_evidence(torch.from_numpy(output)) + alpha = evidence + 1 + uncertainty = num_classes / torch.sum(alpha, dim=1) + scores = alpha / torch.sum(alpha, dim=1, keepdim=True) + all_uncertainties.append(uncertainty.numpy()) + # compute the predictions and save labels + preds = np.argmax(scores.numpy(), axis=1) + all_results.append(preds) + conf = np.max(scores.numpy(), axis=1) + all_confidences.append(conf) + + labels = data['label'].numpy() + all_gts.append(labels) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + all_confidences = np.concatenate(all_confidences, axis=0) + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + all_results = np.concatenate(all_results, axis=0) + all_gts = np.concatenate(all_gts, axis=0) + + return all_confidences, all_uncertainties, all_results, all_gts + + +def run_inference(model, datalist_file, npass=10): + # switch config for different dataset + cfg = model.cfg + cfg.data.test.ann_file = datalist_file + cfg.data.test.data_prefix = os.path.join(os.path.dirname(datalist_file), 'videos') + evidence = cfg.get('evidence', 'exp') + + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + dist=False, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + + if not args.uncertainty == 'EDL': + all_confidences, all_uncertainties, all_results, all_gts = run_stochastic_inference(model, data_loader, npass) + else: + all_confidences, all_uncertainties, all_results, all_gts = run_evidence_inference(model, data_loader, evidence) + return all_confidences, all_uncertainties, all_results, all_gts + + +def main(): + + # build the recognizer from a config file and checkpoint file/url + model = init_recognizer( + args.config, + args.checkpoint, + device=device, + use_frames=False) + cfg = model.cfg + if not args.uncertainty == 'EDL': + # use dropout in testing stage + if 'dnn' in args.config: + model.apply(apply_dropout) + if 'bnn' in args.config: + model.test_cfg.npass = 1 + # set cudnn benchmark + torch.backends.cudnn.benchmark = True + cfg.data.test.test_mode = True + + result_file = os.path.join(args.result_prefix + '_result.npz') + if not os.path.exists(result_file): + # prepare result path + result_dir = os.path.dirname(result_file) + if not os.path.exists(result_dir): + os.makedirs(result_dir) + # run inference (OOD) + ood_confidences, ood_uncertainties, ood_results, ood_labels = run_inference(model, args.ood_data, npass=args.forward_pass) + # run inference (IND) + ind_confidences, ind_uncertainties, ind_results, ind_labels = run_inference(model, args.ind_data, npass=args.forward_pass) + # save + np.savez(result_file[:-4], ind_conf=ind_confidences, ood_conf=ood_confidences, + ind_unctt=ind_uncertainties, ood_unctt=ood_uncertainties, + ind_pred=ind_results, ood_pred=ood_results, + ind_label=ind_labels, ood_label=ood_labels) + else: + results = np.load(result_file, allow_pickle=True) + ind_confidences = results['ind_conf'] + ood_confidences = results['ood_conf'] + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + # visualize + ind_uncertainties = np.array(ind_uncertainties) + ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize + ood_uncertainties = np.array(ood_uncertainties) + ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize + dataName_ind = args.ind_data.split('/')[-2].upper() + dataName_ood = args.ood_data.split('/')[-2].upper() + if dataName_ind == 'UCF101': + dataName_ind = 'UCF-101' + if dataName_ood == 'MIT': + dataName_ood = 'MiT-v2' + if dataName_ood == 'HMDB51': + dataName_ood = 'HMDB-51' + plt.figure(figsize=(5,4)) # (w, h) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 15 + plt.hist([ind_uncertainties, ood_uncertainties], 50, + density=True, histtype='bar', color=['blue', 'red'], + label=['in-distribution (%s)'%(dataName_ind), 'out-of-distribution (%s)'%(dataName_ood)]) + plt.legend(fontsize=fontsize) + plt.xlabel('%s uncertainty'%(args.uncertainty), fontsize=fontsize) + plt.ylabel('density', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.xlim(0, 1.01) + plt.ylim(0, 10.01) + plt.tight_layout() + plt.savefig(os.path.join(args.result_prefix + '_distribution.png')) + plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf')) + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + + main() diff --git a/Downstream/Open-Set-Action-Recognition/experiments/ood_detection_dist.py b/Downstream/Open-Set-Action-Recognition/experiments/ood_detection_dist.py new file mode 100644 index 0000000..35fa85d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/ood_detection_dist.py @@ -0,0 +1,407 @@ +import argparse +import os +import os.path as osp +import torch +import mmcv +from mmcv import Config, DictAction +from mmaction.apis import init_recognizer +from mmcv.parallel import collate, scatter +from operator import itemgetter +from mmaction.datasets.pipelines import Compose +from mmaction.datasets import build_dataloader, build_dataset +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import get_dist_info, init_dist, load_checkpoint +import numpy as np +from scipy.special import xlogy +import matplotlib.pyplot as plt +import matplotlib.ticker as ticker +from tqdm import tqdm +import torch.multiprocessing +torch.multiprocessing.set_sharing_strategy('file_system') +import pdb +from mmaction.models import build_model +from mmcv.cnn import fuse_conv_bn +from mmaction.apis import collect_results_cpu + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 test') + # model config + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file/url') + parser.add_argument('--uncertainty', choices=['BALD', 'Entropy', 'EDL'], help='the uncertainty estimation method') + parser.add_argument('--forward_pass', type=int, default=10, help='the number of forward passes') + # data config + parser.add_argument('--ind_data', help='the split file of in-distribution testing data') + parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data') + # env config + parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument('--result_prefix', help='result file prefix') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + + args = parser.parse_args() + return args + + +def apply_dropout(m): + if type(m) == torch.nn.Dropout: + m.train() + + +def update_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + +def compute_uncertainty(predictions, method='BALD'): + """Compute the entropy + scores: (B x C x T) + """ + expected_p = np.mean(predictions, axis=-1) # mean of all forward passes (C,) + entropy_expected_p = - np.sum(xlogy(expected_p, expected_p), axis=1) # the entropy of expect_p (across classes) + if method == 'Entropy': + uncertain_score = entropy_expected_p + elif method == 'BALD': + expected_entropy = - np.mean(np.sum(xlogy(predictions, predictions), axis=1), axis=-1) # mean of entropies (across classes), (scalar) + uncertain_score = entropy_expected_p - expected_entropy + else: + raise NotImplementedError + if not np.all(np.isfinite(uncertain_score)): + uncertain_score[~np.isfinite] = 9999 + return uncertain_score + + +def run_stochastic_inference(model, data_loader, npass=10): + # run inference + # model = MMDataParallel(model, device_ids=[0]) + # model = MMDistributedDataParallel( + # model.cuda(), + # device_ids=[torch.cuda.current_device()], + # broadcast_buffers=False) + + model.eval() + + all_confidences, all_uncertainties, all_results, all_gts = [], [], [], [] + # prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + all_scores = [] + with torch.no_grad(): + for n in range(npass): + # set new random seed + update_seed(n * 1234) + #-original-- + # scores = model(return_loss=False, **data) + # # gather results + # all_scores.append(np.expand_dims(scores, axis=-1)) + # all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + # # compute the uncertainty + # uncertainty = compute_uncertainty(all_scores, method=args.uncertainty) + # all_uncertainties.append(uncertainty) + #---vae--------------------------------- + # # pdb.set_trace() + # scores, recon = model(return_loss=False, **data) + # uncertainty = recon + # all_scores.append(np.expand_dims(scores, axis=-1)) + # all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + # all_uncertainties.append(uncertainty) + #----------------------------------------------- + #---FLOW--------------------------------- + # pdb.set_trace() + scores, logpx = model(return_loss=False, **data) + uncertainty = logpx + all_scores.append(np.expand_dims(scores, axis=-1)) + all_scores = np.concatenate(all_scores, axis=-1) # (B, C, T) + all_uncertainties.append(uncertainty) + #----------------------------------------------- + + # compute the predictions and save labels + mean_scores = np.mean(all_scores, axis=-1) + preds = np.argmax(mean_scores, axis=1) + all_results.append(preds) + conf = np.max(mean_scores, axis=1) + all_confidences.append(conf) + + labels = data['label'].numpy() + all_gts.append(labels) + + # # use the first key as main key to calculate the batch size + # batch_size = len(next(iter(data.values()))) + # for _ in range(batch_size): + # prog_bar.update() + + if rank == 0: + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size * world_size): + prog_bar.update() + # pdb.set_trace() + all_confidences = collect_results_cpu(all_confidences, len(data_loader.dataset), tmpdir=None) + all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None) + all_results = collect_results_cpu(all_results, len(data_loader.dataset), tmpdir=None) + all_gts = collect_results_cpu(all_gts, len(data_loader.dataset), tmpdir=None) + rank, _ = get_dist_info() + if rank == 0: + all_confidences = np.concatenate(all_confidences, axis=0) + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + all_results = np.concatenate(all_results, axis=0) + all_gts = np.concatenate(all_gts, axis=0) + # pdb.set_trace() + return all_confidences, all_uncertainties, all_results, all_gts + + +def run_evidence_inference(model, data_loader, evidence='exp'): + # set new random seed + update_seed(1234) + # get the evidence function + if evidence == 'relu': + from mmaction.models.losses.edl_loss import relu_evidence as get_evidence + elif evidence == 'exp': + from mmaction.models.losses.edl_loss import exp_evidence as get_evidence + elif evidence == 'softplus': + from mmaction.models.losses.edl_loss import softplus_evidence as get_evidence + else: + raise NotImplementedError + # pdb.set_trace() + num_classes = 101 + model.eval() + + # run inference + # model = MMDataParallel(model, device_ids=[0]) + all_confidences, all_uncertainties, all_results, all_gts = [], [], [], [] + # prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(data_loader.dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + output = model(return_loss=False, **data) + evidence = get_evidence(torch.from_numpy(output)) + alpha = evidence + 1 + uncertainty = num_classes / torch.sum(alpha, dim=1) + scores = alpha / torch.sum(alpha, dim=1, keepdim=True) + all_uncertainties.append(uncertainty.numpy()) + + #---vae--------------------------------- + # output, recon = model(return_loss=False, **data) + # evidence = get_evidence(torch.from_numpy(output)) + # alpha = evidence + 1 + # scores = alpha / torch.sum(alpha, dim=1, keepdim=True) + # uncertainty = recon + # all_uncertainties.append(uncertainty) + #------------------------------------------- + # pdb.set_trace() + # compute the predictions and save labels + preds = np.argmax(scores.numpy(), axis=1) + all_results.append(preds) + conf = np.max(scores.numpy(), axis=1) + all_confidences.append(conf) + + labels = data['label'].numpy() + all_gts.append(labels) + + # use the first key as main key to calculate the batch size + # batch_size = len(next(iter(data.values()))) + # for _ in range(batch_size): + # prog_bar.update() + if rank == 0: + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size * world_size): + prog_bar.update() + # pdb.set_trace() + all_confidences = collect_results_cpu(all_confidences, len(data_loader.dataset), tmpdir=None) + all_uncertainties = collect_results_cpu(all_uncertainties, len(data_loader.dataset), tmpdir=None) + all_results = collect_results_cpu(all_results, len(data_loader.dataset), tmpdir=None) + all_gts = collect_results_cpu(all_gts, len(data_loader.dataset), tmpdir=None) + rank, _ = get_dist_info() + if rank == 0: + # pdb.set_trace() + all_confidences = np.concatenate(all_confidences, axis=0) + all_uncertainties = np.concatenate(all_uncertainties, axis=0) + all_results = np.concatenate(all_results, axis=0) + all_gts = np.concatenate(all_gts, axis=0) + # pdb.set_trace() + return all_confidences, all_uncertainties, all_results, all_gts + + +def run_inference(model, distributed, cfg, datalist_file, npass=10): + # switch config for different dataset + # cfg = model.cfg + + cfg.data.test.ann_file = datalist_file + cfg.data.test.data_prefix = os.path.join(os.path.dirname(datalist_file), 'videos') + evidence = cfg.get('evidence', 'exp') + + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + dist=distributed, + shuffle=False, + pin_memory=False) + dataloader_setting = dict(dataloader_setting, **cfg.data.get('test_dataloader', {})) + data_loader = build_dataloader(dataset, **dataloader_setting) + + # pdb.set_trace() + if not args.uncertainty == 'EDL': + all_confidences, all_uncertainties, all_results, all_gts = run_stochastic_inference(model, data_loader, npass) + else: + all_confidences, all_uncertainties, all_results, all_gts = run_evidence_inference(model, data_loader, evidence) + return all_confidences, all_uncertainties, all_results, all_gts + + +def main(): + # pdb.set_trace() + # build the recognizer from a config file and checkpoint file/url + args = parse_args() + cfg = Config.fromfile(args.config) + cfg.merge_from_dict(args.cfg_options) + + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + load_checkpoint(model, args.checkpoint, map_location='cpu') + print('cpk is loaded:', args.checkpoint) + + # model = init_recognizer( + # args.config, + # args.checkpoint, + # device=device, + # use_frames=False) + # cfg = model.cfg + + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + + + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + + # if not args.uncertainty == 'EDL': + # # use dropout in testing stage + # if 'dnn' in args.config: + # model.apply(apply_dropout) + # if 'bnn' in args.config: + # model.test_cfg.npass = 1 + + # set cudnn benchmark + torch.backends.cudnn.benchmark = True + cfg.data.test.test_mode = True + + result_file = os.path.join(args.result_prefix + '_result.npz') + if not os.path.exists(result_file): + # prepare result path + result_dir = os.path.dirname(result_file) + if not os.path.exists(result_dir): + # os.makedirs(result_dir) + os.makedirs(result_dir, exist_ok=True) + + # run inference (OOD) + # pdb.set_trace() + ood_confidences, ood_uncertainties, ood_results, ood_labels = run_inference(model, distributed, cfg, args.ood_data, npass=args.forward_pass) + # run inference (IND) + # pdb.set_trace() + ind_confidences, ind_uncertainties, ind_results, ind_labels = run_inference(model, distributed, cfg, args.ind_data, npass=args.forward_pass) + + # save + np.savez(result_file[:-4], ind_conf=ind_confidences, ood_conf=ood_confidences, + ind_unctt=ind_uncertainties, ood_unctt=ood_uncertainties, + ind_pred=ind_results, ood_pred=ood_results, + ind_label=ind_labels, ood_label=ood_labels) + else: + results = np.load(result_file, allow_pickle=True) + ind_confidences = results['ind_conf'] + ood_confidences = results['ood_conf'] + ind_uncertainties = results['ind_unctt'] # (N1,) + ood_uncertainties = results['ood_unctt'] # (N2,) + ind_results = results['ind_pred'] # (N1,) + ood_results = results['ood_pred'] # (N2,) + ind_labels = results['ind_label'] + ood_labels = results['ood_label'] + # visualize + # pdb.set_trace() + # torch.distributed.barrier() + # ood_confidences = np.concatenate(ood_confidences, axis=0) + # ood_uncertainties = np.concatenate(ood_uncertainties, axis=0) + # ood_results = np.concatenate(ood_results, axis=0) + # ood_labels = np.concatenate(ood_labels, axis=0) + # ind_confidences = np.concatenate(ind_confidences, axis=0) + # ind_uncertainties = np.concatenate(ind_uncertainties, axis=0) + # ind_results = np.concatenate(ind_results, axis=0) + # ind_labels = np.concatenate(ind_labels, axis=0) + # pdb.set_trace() + rank, _ = get_dist_info() + if rank == 0: + ind_uncertainties = np.array(ind_uncertainties.squeeze()) + # ind_uncertainties = (ind_uncertainties-np.min(ind_uncertainties)) / (np.max(ind_uncertainties) - np.min(ind_uncertainties)) # normalize + ood_uncertainties = np.array(ood_uncertainties.squeeze()) + # ood_uncertainties = (ood_uncertainties-np.min(ood_uncertainties)) / (np.max(ood_uncertainties) - np.min(ood_uncertainties)) # normalize + + # ind和ood合在一起nomalize + all_uncertainties = np.concatenate((ind_uncertainties, ood_uncertainties)) + ind_uncertainties = (ind_uncertainties-np.min(all_uncertainties)) / (np.max(all_uncertainties) - np.min(all_uncertainties)) # normalize + ood_uncertainties = (ood_uncertainties-np.min(all_uncertainties)) / (np.max(all_uncertainties) - np.min(all_uncertainties)) # normalize + + dataName_ind = args.ind_data.split('/')[-2].upper() + dataName_ood = args.ood_data.split('/')[-2].upper() + if dataName_ind == 'UCF101': + dataName_ind = 'UCF-101' + if dataName_ood == 'MIT': + dataName_ood = 'MiT-v2' + if dataName_ood == 'HMDB51': + dataName_ood = 'HMDB-51' + plt.figure(figsize=(5,4)) # (w, h) + plt.rcParams["font.family"] = "Arial" # Times New Roman + fontsize = 15 + # pdb.set_trace() + plt.hist([ind_uncertainties, ood_uncertainties], 50, + density=True, histtype='bar', color=['blue', 'red'], + label=['in-distribution (%s)'%(dataName_ind), 'out-of-distribution (%s)'%(dataName_ood)]) + plt.legend(fontsize=fontsize) + plt.xlabel('%s uncertainty'%(args.uncertainty), fontsize=fontsize) + plt.ylabel('density', fontsize=fontsize) + plt.xticks(fontsize=fontsize) + plt.yticks(fontsize=fontsize) + plt.xlim(0, 1.01) + plt.ylim(0, 10.01) + plt.tight_layout() + plt.savefig(os.path.join(args.result_prefix + '_distribution.png')) + plt.savefig(os.path.join(args.result_prefix + '_distribution.pdf')) + +if __name__ == '__main__': + + args = parse_args() + # assign the desired device. + device = torch.device(args.device) + + main() diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_openmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_openmax.sh new file mode 100644 index 0000000..6f3d6f8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_openmax.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/slowfast/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_openmax.py \ + --config configs/recognition/slowfast/inference_slowfast_dnn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_dnn/latest.pth \ + --cache_mav_dist experiments/slowfast/results_baselines/openmax/ucf101_mav_dist \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/slowfast/results_baselines/openmax/SlowFast_OpenMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_rpl.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_rpl.sh new file mode 100644 index 0000000..0d4cae4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_rpl.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_rpl.py \ + --config configs/recognition/slowfast/inference_slowfast_rpl.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_rpl/latest.pth \ + --train_data data/ucf101/ucf101_train_split_1_videos.txt \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ood_dataname ${OOD_DATASET} \ + --result_prefix experiments/slowfast/results_baselines/rpl/RPL + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_softmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_softmax.sh new file mode 100644 index 0000000..4c01d6e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/baseline_slowfast_softmax.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 + +case ${OOD_DATASET} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/slowfast/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_softmax.py \ + --config configs/recognition/slowfast/inference_slowfast_dnn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_dnn/latest.pth \ + --batch_size 1 \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/slowfast/results_baselines/softmax/SlowFast_SoftMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_bnn_ucf101.sh new file mode 100644 index 0000000..bf6b79d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_bnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/slowfast/finetune_ucf101_slowfast_bnn.py \ + work_dirs/slowfast/finetune_ucf101_slowfast_bnn/latest.pth \ + --out work_dirs/slowfast/test_ucf101_slowfast_bnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_dnn_ucf101.sh new file mode 100644 index 0000000..6e0478e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_dnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/slowfast/finetune_ucf101_slowfast_dnn.py \ + work_dirs/slowfast/finetune_ucf101_slowfast_dnn/latest.pth \ + --out work_dirs/slowfast/test_ucf101_slowfast_dnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..834646a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py \ + work_dirs/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias/latest.pth \ + --out work_dirs/slowfast/test_ucf101_slowfast_edlnokl_avuc_debias.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_rpl_ucf101.sh new file mode 100644 index 0000000..d356aa1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/evaluate_slowfast_rpl_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/slowfast/finetune_ucf101_slowfast_rpl.py \ + work_dirs/slowfast/finetune_ucf101_slowfast_rpl/latest.pth \ + --out work_dirs/slowfast/test_ucf101_slowfast_rpl.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_bnn_ucf101.sh new file mode 100644 index 0000000..d267396 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_bnn_ucf101.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/slowfast/finetune_ucf101_slowfast_bnn.py \ + --work-dir work_dirs/slowfast/finetune_ucf101_slowfast_bnn \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_dnn_ucf101.sh new file mode 100644 index 0000000..ff81031 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_dnn_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/slowfast/finetune_ucf101_slowfast_dnn.py \ + --work-dir work_dirs/slowfast/finetune_ucf101_slowfast_dnn \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..b7e7550 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py \ + --work-dir work_dirs/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_rpl_ucf101.sh new file mode 100644 index 0000000..31eb427 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/finetune_slowfast_rpl_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/slowfast/finetune_ucf101_slowfast_rpl.py \ + --work-dir work_dirs/slowfast/finetune_ucf101_slowfast_rpl \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_draw_confmat.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_draw_confmat.sh new file mode 100644 index 0000000..10424fd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_draw_confmat.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_PATH="experiments/slowfast/results" + +# Confusion Matrix comparison +python experiments/draw_confusion_matrix.py \ + --ood_result ${RESULT_PATH}/SlowFast_EDLNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz \ + --uncertain_thresh 0.004552 \ + --save_file ${RESULT_PATH}/../results_confmat/SlowFast_DEAR_${OOD_DATA}_ConfMat.png + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_get_threshold.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_get_threshold.sh new file mode 100644 index 0000000..52c58e9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_get_threshold.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +MODEL=$2 +BATCHSIZE=$3 +TRAIN_DATA='data/ucf101/ucf101_train_split_1_videos.txt' +RESULT_DIR='experiments/slowfast/results' + +case ${MODEL} in + dnn) + # get the BALD threshold for slowfast model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/slowfast/inference_slowfast_dnn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_dnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/SlowFast_DNN_BALD + ;; + bnn) + # get the BALD threshold for slowfast model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/slowfast/inference_slowfast_bnn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_bnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/SlowFast_BNN_BALD + ;; + edlnokl_avuc_debias) + # get the EDL threshold for SlowFast_EDL_AvUC_Debias model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/slowfast/inference_slowfast_enn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/SlowFast_EDLNoKLAvUCDebias_EDL + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_ood_detection.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_ood_detection.sh new file mode 100644 index 0000000..be99386 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_ood_detection.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ +# DEVICE=$1 +OOD_DATASET=$1 +MODEL=$2 +GPUS=$3 +PORT=${PORT:-29500} +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + ;; + MiT) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/slowfast/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/slowfast/inference_slowfast_dnn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_dnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/SlowFast_DNN_BALD_${OOD_DATASET} + ;; + bnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/slowfast/inference_slowfast_bnn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_bnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/SlowFast_BNN_BALD_${OOD_DATASET} + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + # CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT experiments/ood_detection_dist.py \ + --config configs/recognition/slowfast/inference_slowfast_enn.py \ + --checkpoint work_dirs/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/SlowFast_EDLNoKLAvUCDebias_EDL_${OOD_DATASET} \ + --launcher pytorch + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_openness.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_openness.sh new file mode 100644 index 0000000..922aaeb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_openness.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +case ${OOD_DATA} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Invalid OOD Dataset: "${OOD_DATA} + exit + ;; +esac + +# OOD Detection comparison +python experiments/compare_openness.py \ + --base_model slowfast \ + --baselines SlowFast_DNN_BALD SlowFast_BNN_BALD SlowFast_EDLNoKLAvUCDebias_EDL \ + --thresholds 0.000065 0.000010 0.004552 \ + --styles b r \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ind_ncls 101 \ + --result_png F1_openness_compare_${OOD_DATA}.png + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_openness_new.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_openness_new.sh new file mode 100644 index 0000000..118eb6f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_openness_new.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_FILES=("results_baselines/openmax/SlowFast_OpenMax_${OOD_DATA}_result.npz" \ # openmax + "results/SlowFast_DNN_BALD_${OOD_DATA}_result.npz" \ # mc dropout + "results/SlowFast_BNN_BALD_${OOD_DATA}_result.npz" \ # bnn svi + "results_baselines/openmax/SlowFast_OpenMax_${OOD_DATA}_result.npz" \ # softmax + "results_baselines/rpl/SlowFast_RPL_${OOD_DATA}_result.npz" \ # rpl + "results/SlowFast_EDLNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz") # dear (ours) +THRESHOLDS=(-1 \ + 0.000065 \ + 0.000010 \ + 0.997915 \ + 0.997780 \ + 0.004552) + +# OOD Detection comparison +# The folders `results/` and `results_baselines` are in the `experiments/slowfast/` folder. +python experiments/compare_openness_new.py \ + --base_model slowfast \ + --ood_data ${OOD_DATA} \ + --baseline_results ${RESULT_FILES[@]} + +# OOD Detection comparison by using thresholds +echo 'Results by using thresholds:' +python experiments/compare_openness_new.py \ + --base_model slowfast \ + --ood_data ${OOD_DATA} \ + --thresholds ${THRESHOLDS[@]} \ + --baseline_results ${RESULT_FILES[@]} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_reliability_evaluation.sh b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_reliability_evaluation.sh new file mode 100644 index 0000000..3c43104 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/slowfast/run_reliability_evaluation.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATASET=$1 +MODEL=$2 +RESULT_DIR='experiments/slowfast/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/SlowFast_DNN_BALD_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/SlowFast_DNN_BALD_${OOD_DATASET}_reliability + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/SlowFast_EDLNoKLAvUCDebias_EDL_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/SlowFast_EDLNoKLAvUCDebias_EDL_${OOD_DATASET}_reliability + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/analyze_features.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/analyze_features.sh new file mode 100644 index 0000000..dd4e707 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/analyze_features.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +IND_DATA="data/ucf101/ucf101_val_split_1_videos.txt" +OOD_DATA="data/hmdb51/hmdb51_val_split_1_videos.txt" +RESULT_PATH="experiments/tpn_slowonly/results_tSNE" + + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/analyze_features.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_bnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn/latest.pth \ + --known_split ${IND_DATA} \ + --unknown_split ${OOD_DATA} \ + --result_file ${RESULT_PATH}/tSNE_bnn_HMDB.png + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/analyze_features.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss/latest.pth \ + --known_split ${IND_DATA} \ + --unknown_split ${OOD_DATA} \ + --result_file ${RESULT_PATH}/tSNE_dnn_HMDB.png + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/analyze_features.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl/latest.pth \ + --known_split ${IND_DATA} \ + --unknown_split ${OOD_DATA} \ + --result_file ${RESULT_PATH}/tSNE_enn_HMDB.png + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/analyze_features.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc/latest.pth \ + --known_split ${IND_DATA} \ + --unknown_split ${OOD_DATA} \ + --result_file ${RESULT_PATH}/tSNE_enn_avuc_HMDB.png + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/analyze_features.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias/latest.pth \ + --known_split ${IND_DATA} \ + --unknown_split ${OOD_DATA} \ + --result_file ${RESULT_PATH}/tSNE_enn_avuc_debias_HMDB.png + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_openmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_openmax.sh new file mode 100644 index 0000000..d4fa6d0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_openmax.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/tpn_slowonly/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_openmax.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss/latest.pth \ + --cache_mav_dist experiments/tpn_slowonly/results_baselines/openmax/ucf101_mav_dist \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/tpn_slowonly/results_baselines/openmax/TPN_OpenMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_rpl.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_rpl.sh new file mode 100644 index 0000000..f9d5c3a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_rpl.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_rpl.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_rpl.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_rpl/latest.pth \ + --train_data data/ucf101/ucf101_train_split_1_videos.txt \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ood_dataname ${OOD_DATASET} \ + --result_prefix experiments/tpn_slowonly/results_baselines/rpl/RPL + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_softmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_softmax.sh new file mode 100644 index 0000000..201c32b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/baseline_tpn_softmax.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 + +case ${OOD_DATASET} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/tpn_slowonly/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_softmax.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss/latest.pth \ + --batch_size 1 \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/tpn_slowonly/results_baselines/softmax/TPN_SoftMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_bnn_ucf101.sh new file mode 100644 index 0000000..b158ce4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_bnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_bnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_celoss_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_celoss_ucf101.sh new file mode 100644 index 0000000..8c47faa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_celoss_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_celoss_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_celoss.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_avuc_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_avuc_ucf101.sh new file mode 100644 index 0000000..266852e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_avuc_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_edlloss_avuc.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..12d6342 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_avuc_debias_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_avuc_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_avuc_ucf101.sh new file mode 100644 index 0000000..3757606 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_avuc_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_edlloss_nokl_avuc.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_ucf101.sh new file mode 100644 index 0000000..1f7b6c2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_nokl_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_edlloss_nokl.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_ucf101.sh new file mode 100644 index 0000000..378c26b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_edlloss_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_edlloss_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_edlloss.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_rpl_ucf101.sh new file mode 100644 index 0000000..102fe3a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/evaluate_tpn_rpl_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_rpl/latest.pth \ + --out work_dirs/tpn_slowonly/test_ucf101_tpn_slowonly_rpl.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_bnn_ucf101.sh new file mode 100644 index 0000000..79e2ce5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_bnn_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_celoss_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_celoss_ucf101.sh new file mode 100644 index 0000000..b9e03bb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_celoss_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_celoss_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_avuc_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_avuc_ucf101.sh new file mode 100644 index 0000000..e4ff5c2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_avuc_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..c669729 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_avuc_debias_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_avuc_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_avuc_ucf101.sh new file mode 100644 index 0000000..b03b569 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_avuc_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_ucf101.sh new file mode 100644 index 0000000..41d3507 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_nokl_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_ucf101.sh new file mode 100644 index 0000000..7b26985 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_edlloss_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/tpn_slowonly_edlloss_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_rpl_ucf101.sh new file mode 100644 index 0000000..7053453 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/finetune_tpn_rpl_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py \ + --work-dir work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_rpl \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/gradcam_demo.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/gradcam_demo.sh new file mode 100644 index 0000000..34248f8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/gradcam_demo.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 + +CUDA_VISIBLE_DEVICES=${DEVICE} python demo/demo_gradcam.py configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc/latest.pth demo/demo.mp4 \ + --target-layer-name backbone/layer4/1/relu --fps 10 \ + --out-filename experiments/tpn_slowonly/results/demo_gradcam.gif + + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_draw_confmat.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_draw_confmat.sh new file mode 100644 index 0000000..7b1c9da --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_draw_confmat.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_PATH="experiments/tpn_slowonly/results" + +# Confusion Matrix comparison +python experiments/draw_confusion_matrix.py \ + --ood_result ${RESULT_PATH}/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz \ + --uncertain_thresh 0.004555 \ + --top_part \ + --save_file ${RESULT_PATH}/../results_confmat/TPN_DEAR_${OOD_DATA}_ConfMat.png + +# python experiments/draw_confusion_matrix.py \ +# --ood_result ${RESULT_PATH}/TPN_SlowOnly_EDLlogNoKLAvUC_EDL_${OOD_DATA}_result.npz \ +# --uncertain_thresh 0.495800 \ +# --save_file ${RESULT_PATH}/../results_confmat/confmat_EDLlogNoKLAvUC_EDL_${OOD_DATA}.png + +# python experiments/draw_confusion_matrix.py \ +# --ood_result ${RESULT_PATH}/TPN_SlowOnly_EDLlogNoKL_EDL_${OOD_DATA}_result.npz \ +# --uncertain_thresh 0.495806 \ +# --save_file ${RESULT_PATH}/../results_confmat/confmat_EDLlogNoKL_EDL_${OOD_DATA}.png + +# python experiments/draw_confusion_matrix.py \ +# --ood_result ${RESULT_PATH}/TPN_SlowOnly_Dropout_BALD_${OOD_DATA}_result.npz \ +# --uncertain_thresh 0.000096 \ +# --save_file ${RESULT_PATH}/../results_confmat/confmat_Dropout_BALD_${OOD_DATA}.png + +# python experiments/draw_confusion_matrix.py \ +# --ood_result ${RESULT_PATH}/TPN_SlowOnly_BNN_BALD_${OOD_DATA}_result.npz \ +# --uncertain_thresh 0.000007 \ +# --save_file ${RESULT_PATH}/../results_confmat/confmat_BNN_BALD_${OOD_DATA}.png + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_get_threshold.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_get_threshold.sh new file mode 100644 index 0000000..a86cf6d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_get_threshold.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +MODEL=$2 +BATCHSIZE=$3 +TRAIN_DATA='data/ucf101/ucf101_train_split_1_videos.txt' +RESULT_DIR='experiments/tpn_slowonly/results' + +case ${MODEL} in + dropout) + # get the BALD threshold for TPN_SlowOnly_Dropout model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_Dropout_BALD + ;; + bnn) + # get the BALD threshold for TPN_SlowOnly_BNN model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_bnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_BNN_BALD + ;; + edl) + # get the EDL threshold for TPN_SlowOnly_EDL model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlog_EDL + ;; + edl_nokl) + # get the EDL threshold for TPN_SlowOnly_EDL model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKL_EDL + ;; + edl_avuc) + # get the EDL threshold for TPN_SlowOnly_EDL_AvUC model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogAvUC_EDL + ;; + edl_nokl_avuc) + # get the EDL threshold for TPN_SlowOnly_EDL_AvUC model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKLAvUC_EDL + ;; + edl_nokl_avuc_debias) + # get the EDL threshold for TPN_SlowOnly_EDL_AvUC model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_ood_detection.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_ood_detection.sh new file mode 100644 index 0000000..31b230d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_ood_detection.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +MODEL=$3 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + ;; + MiT) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/tpn_slowonly/results' + +case ${MODEL} in + dropout) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_dnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_Dropout_BALD_${OOD_DATASET} + ;; + bnn) + # Bayesian Neural Network + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_bnn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_BNN_BALD_${OOD_DATASET} + ;; + edl) + # Evidential Deep Learning + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlog_EDL_${OOD_DATASET} + ;; + edl_nokl) + # Evidential Deep Learning + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKL_EDL_${OOD_DATASET} + ;; + edl_avuc) + # Evidential Deep Learning with AvU Calibration + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogAvUC_EDL_${OOD_DATASET} + ;; + edl_nokl_avuc) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKLAvUC_EDL_${OOD_DATASET} + ;; + edl_nokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tpn/inference_tpn_slowonly_enn.py \ + --checkpoint work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_${OOD_DATASET} + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_openness.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_openness.sh new file mode 100644 index 0000000..bc15da3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_openness.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +case ${OOD_DATA} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Invalid OOD Dataset: "${OOD_DATA} + exit + ;; +esac + +# OOD Detection comparison +python experiments/compare_openness.py \ + --base_model tpn_slowonly \ + --baselines TPN_SlowOnly_Dropout_BALD TPN_SlowOnly_BNN_BALD TPN_SlowOnly_EDLlogNoKLAvUC_EDL TPN_SlowOnly_EDLlogNoKL_EDL TPN_SlowOnly_EDLlogNoKLAvUCDebiasNet_EDL TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL \ + --thresholds 0.000096 0.000007 0.495800 0.495806 0.004554 0.004555 \ + --styles b k m c r g \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ind_ncls 101 \ + --result_png F1_openness_compare_${OOD_DATA}.png + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_openness_new.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_openness_new.sh new file mode 100644 index 0000000..5bf89d4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_openness_new.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_FILES=("results_baselines/openmax/TPN_OpenMax_${OOD_DATA}_result.npz" \ # openmax + "results/TPN_SlowOnly_Dropout_BALD_${OOD_DATA}_result.npz" \ # mc dropout + "results/TPN_SlowOnly_BNN_BALD_${OOD_DATA}_result.npz" \ # bnn svi + "results_baselines/openmax/TPN_OpenMax_${OOD_DATA}_result.npz" \ # softmax + "results_baselines/rpl/TPN_RPL_${OOD_DATA}_result.npz" \ # rpl + "results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz") # dear (ours) +THRESHOLDS=(-1 \ + 0.000096 \ + 0.000007 \ + 0.997623 \ + 0.996931 \ + 0.004555) + +# OOD Detection comparison +# The folders `results/` and `results_baselines` are in the `experiments/tpn_slowonly/` folder. +python experiments/compare_openness_new.py \ + --base_model tpn_slowonly \ + --ood_data ${OOD_DATA} \ + --baseline_results ${RESULT_FILES[@]} + +# OOD Detection comparison by using thresholds +echo 'Results by using thresholds:' +python experiments/compare_openness_new.py \ + --base_model tpn_slowonly \ + --ood_data ${OOD_DATA} \ + --thresholds ${THRESHOLDS[@]} \ + --baseline_results ${RESULT_FILES[@]} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_reliability_evaluation.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_reliability_evaluation.sh new file mode 100644 index 0000000..09eb600 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/run_reliability_evaluation.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATASET=$1 +MODEL=$2 +RESULT_DIR='experiments/tpn_slowonly/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/TPN_SlowOnly_Dropout_BALD_${OOD_DATASET}_result.npz \ + --threshold 0.000096 \ + --save_prefix ${RESULT_DIR}/../results_reliability/TPN_SlowOnly_Dropout_BALD_${OOD_DATASET}_reliability + ;; + bnn) + # BNN model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/TPN_SlowOnly_BNN_BALD_${OOD_DATASET}_result.npz \ + --threshold 0.000007 \ + --save_prefix ${RESULT_DIR}/../results_reliability/TPN_SlowOnly_BNN_BALD_${OOD_DATASET}_reliability + ;; + edlnokl) + # Evidential Deep Learning (without KL divergence loss term) + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKL_EDL_${OOD_DATASET}_result.npz \ + --threshold 0.495806 \ + --save_prefix ${RESULT_DIR}/../results_reliability/TPN_SlowOnly_EDLlogNoKL_EDL_${OOD_DATASET}_reliability + ;; + edlnokl_avuc) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/TPN_SlowOnly_EDLlogNoKLAvUC_EDL_${OOD_DATASET}_result.npz \ + --threshold 0.495800 \ + --save_prefix ${RESULT_DIR}/../results_reliability/TPN_SlowOnly_EDLlogNoKLAvUC_EDL_${OOD_DATASET}_reliability + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/searchw_evaluate.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/searchw_evaluate.sh new file mode 100644 index 0000000..7545b45 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/searchw_evaluate.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py \ + work_dirs/tpn_slowonly/search_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias/search_$2/latest.pth \ + --out work_dirs/tpn_slowonly/search_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias/test_$2.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/searchw_finetune.sh b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/searchw_finetune.sh new file mode 100644 index 0000000..4eb72bf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tpn_slowonly/searchw_finetune.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/hypertune.py configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py \ + --work-dir work_dirs/tpn_slowonly/search_ucf101_tpn_slowonly_edlloss_nokl_avuc_debias \ + --weight_factor $2 $3 \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_openmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_openmax.sh new file mode 100644 index 0000000..c9f267f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_openmax.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/tsm/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_openmax.py \ + --config configs/recognition/tsm/inference_tsm_dnn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_dnn/latest.pth \ + --cache_mav_dist experiments/tsm/results_baselines/openmax/ucf101_mav_dist \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/tsm/results_baselines/openmax/TSM_OpenMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_rpl.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_rpl.sh new file mode 100644 index 0000000..e491e56 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_rpl.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + NUM_CLASSES=51 + ;; + MiT) + # run ood detection on mit-v2 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_rpl.py \ + --config configs/recognition/tsm/inference_tsm_rpl.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_rpl/latest.pth \ + --train_data data/ucf101/ucf101_train_split_1_videos.txt \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ood_dataname ${OOD_DATASET} \ + --result_prefix experiments/tsm/results_baselines/rpl/RPL + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_softmax.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_softmax.sh new file mode 100644 index 0000000..16740c7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/baseline_tsm_softmax.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 + +case ${OOD_DATASET} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/tsm/results' + +CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/baseline_softmax.py \ + --config configs/recognition/tsm/inference_tsm_dnn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_dnn/latest.pth \ + --batch_size 4 \ + --ood_ncls ${NUM_CLASSES} \ + --result_prefix experiments/tsm/results_baselines/softmax/TSM_SoftMax_${OOD_DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/demo_tsm_debias.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/demo_tsm_debias.sh new file mode 100644 index 0000000..fafc940 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/demo_tsm_debias.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +DATASET=$2 # kinetics10, or mimetics10 +case ${DATASET} in + kinetics10) + DATA_SPLIT='data/kinetics10/kinetics10_val_list_videos.txt' + VIDEO_DIR='data/kinetics10/videos_val' + ;; + mimetics10) + DATA_SPLIT='data/mimetics10/mimetics10_test_list_videos.txt' + VIDEO_DIR='data/mimetics10/videos' + ;; + *) + echo "Dataset is not supported: "${DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=$1 python experiments/demo.py configs/recognition/tsm/inference_tsm_enn.py \ + --ckpt_dear work_dirs/tsm/train_kinetics10_tsm_DEAR/latest.pth \ + --ckpt_nodebias work_dirs/tsm/train_kinetics10_tsm_DEAR_noDebias/latest.pth \ + --split_file ${DATA_SPLIT} \ + --video_path ${VIDEO_DIR} \ + --result_list experiments/tsm/results_debias/demo_list_TSM_${DATASET}.txt + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_DEAR.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_DEAR.sh new file mode 100644 index 0000000..36ee22e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_DEAR.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +DATASET=$2 # kinetics10, or mimetics10 +case ${DATASET} in + kinetics10) + DATA_SPLIT='data/kinetics10/kinetics10_val_list_videos.txt' + VIDEO_DIR='data/kinetics10/videos_val' + ;; + mimetics10) + DATA_SPLIT='data/mimetics10/mimetics10_test_list_videos.txt' + VIDEO_DIR='data/mimetics10/videos' + ;; + *) + echo "Dataset is not supported: "${DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=$1 python experiments/eval_debias.py configs/recognition/tsm/inference_tsm_enn.py \ + work_dirs/tsm/train_kinetics10_tsm_DEAR/latest.pth \ + --split_file ${DATA_SPLIT} \ + --video_path ${VIDEO_DIR} \ + --result_prefix experiments/tsm/results_debias/Eval_TSM_DEAR_${DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_DEAR_noDebias.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_DEAR_noDebias.sh new file mode 100644 index 0000000..c63b972 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_DEAR_noDebias.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +DATASET=$2 # kinetics10, or mimetics10 +case ${DATASET} in + kinetics10) + DATA_SPLIT='data/kinetics10/kinetics10_val_list_videos.txt' + VIDEO_DIR='data/kinetics10/videos_val' + ;; + mimetics10) + DATA_SPLIT='data/mimetics10/mimetics10_test_list_videos.txt' + VIDEO_DIR='data/mimetics10/videos' + ;; + *) + echo "Dataset is not supported: "${DATASET} + exit + ;; +esac + +CUDA_VISIBLE_DEVICES=$1 python experiments/eval_debias.py configs/recognition/tsm/inference_tsm_enn.py \ + work_dirs/tsm/train_kinetics10_tsm_DEAR_noDebias/latest.pth \ + --split_file ${DATA_SPLIT} \ + --video_path ${VIDEO_DIR} \ + --result_prefix experiments/tsm/results_debias/Eval_TSM_DEAR_noDebias_${DATASET} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_bnn_ucf101.sh new file mode 100644 index 0000000..90f6a62 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_bnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tsm/finetune_ucf101_tsm_bnn.py \ + work_dirs/tsm/finetune_ucf101_tsm_bnn/latest.pth \ + --out work_dirs/tsm/test_ucf101_tsm_bnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_dnn_ucf101.sh new file mode 100644 index 0000000..541177f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_dnn_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tsm/finetune_ucf101_tsm_dnn.py \ + work_dirs/tsm/finetune_ucf101_tsm_dnn/latest.pth \ + --out work_dirs/tsm/test_ucf101_tsm_dnn.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..138ef9b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py \ + work_dirs/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias/latest.pth \ + --out work_dirs/tsm/test_ucf101_tsm_edlnokl_avuc_debias.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_rpl_ucf101.sh new file mode 100644 index 0000000..9a192d4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/evaluate_tsm_rpl_ucf101.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/test.py configs/recognition/tsm/finetune_ucf101_tsm_rpl.py \ + work_dirs/tsm/finetune_ucf101_tsm_rpl/latest.pth \ + --out work_dirs/tsm/test_ucf101_tsm_rpl.pkl \ + --eval top_k_accuracy mean_class_accuracy + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_bnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_bnn_ucf101.sh new file mode 100644 index 0000000..2432b2f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_bnn_ucf101.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tsm/finetune_ucf101_tsm_bnn.py \ + --work-dir work_dirs/tsm/finetune_ucf101_tsm_bnn \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_dnn_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_dnn_ucf101.sh new file mode 100644 index 0000000..0c4878e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_dnn_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tsm/finetune_ucf101_tsm_dnn.py \ + --work-dir work_dirs/tsm/finetune_ucf101_tsm_dnn \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_edlnokl_avuc_debias_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_edlnokl_avuc_debias_ucf101.sh new file mode 100644 index 0000000..fb94538 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_edlnokl_avuc_debias_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py \ + --work-dir work_dirs/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_rpl_ucf101.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_rpl_ucf101.sh new file mode 100644 index 0000000..cf65b6f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/finetune_tsm_rpl_ucf101.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tsm/finetune_ucf101_tsm_rpl.py \ + --work-dir work_dirs/tsm/finetune_ucf101_tsm_rpl \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 \ + --validate + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_draw_confmat.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_draw_confmat.sh new file mode 100644 index 0000000..576e6bd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_draw_confmat.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_PATH="experiments/tsm/results" + +# Confusion Matrix comparison +python experiments/draw_confusion_matrix.py \ + --ood_result ${RESULT_PATH}/TSM_EDLNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz \ + --uncertain_thresh 0.004549 \ + --save_file ${RESULT_PATH}/../results_confmat/TSM_DEAR_${OOD_DATA}_ConfMat.png + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_get_threshold.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_get_threshold.sh new file mode 100644 index 0000000..4cf339c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_get_threshold.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +MODEL=$2 +BATCHSIZE=$3 +TRAIN_DATA='data/ucf101/ucf101_train_split_1_videos.txt' +RESULT_DIR='experiments/tsm/results' + +case ${MODEL} in + dnn) + # get the BALD threshold for tsm model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tsm/inference_tsm_dnn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_dnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TSM_DNN_BALD + ;; + bnn) + # get the BALD threshold for tsm model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tsm/inference_tsm_bnn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_bnn/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TSM_BNN_BALD + ;; + edlnokl_avuc_debias) + # get the EDL threshold for TSM_EDL_AvUC_Debias model trained on UCF-101 + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/get_threshold.py \ + --config configs/recognition/tsm/inference_tsm_enn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias/latest.pth \ + --train_data ${TRAIN_DATA} \ + --batch_size ${BATCHSIZE} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TSM_EDLNoKLAvUCDebias_EDL + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_ood_detection.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_ood_detection.sh new file mode 100644 index 0000000..5967a97 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_ood_detection.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +DEVICE=$1 +OOD_DATASET=$2 +MODEL=$3 +IND_DATA='data/ucf101/ucf101_val_split_1_videos.txt' + +case ${OOD_DATASET} in + HMDB) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/hmdb51/hmdb51_val_split_1_videos.txt' + ;; + MiT) + # run ood detection on hmdb-51 validation set + OOD_DATA='data/mit/mit_val_list_videos.txt' + ;; + *) + echo "Dataset not supported: "${OOD_DATASET} + exit + ;; +esac +RESULT_DIR='experiments/tsm/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tsm/inference_tsm_dnn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_dnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TSM_DNN_BALD_${OOD_DATASET} + ;; + bnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tsm/inference_tsm_bnn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_bnn/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty BALD \ + --result_prefix ${RESULT_DIR}/TSM_BNN_BALD_${OOD_DATASET} + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/ood_detection.py \ + --config configs/recognition/tsm/inference_tsm_enn.py \ + --checkpoint work_dirs/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias/latest.pth \ + --ind_data ${IND_DATA} \ + --ood_data ${OOD_DATA} \ + --uncertainty EDL \ + --result_prefix ${RESULT_DIR}/TSM_EDLNoKLAvUCDebias_EDL_${OOD_DATASET} + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_openness.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_openness.sh new file mode 100644 index 0000000..51a6859 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_openness.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +case ${OOD_DATA} in + HMDB) + NUM_CLASSES=51 + ;; + MiT) + NUM_CLASSES=305 + ;; + *) + echo "Invalid OOD Dataset: "${OOD_DATA} + exit + ;; +esac + +# OOD Detection comparison +python experiments/compare_openness.py \ + --base_model tsm \ + --baselines TSM_DNN_BALD TSM_BNN_BALD TSM_EDLNoKLAvUCDebias_EDL \ + --thresholds 0.000022 0.000003 0.004549 \ + --styles b k r \ + --ood_data ${OOD_DATA} \ + --ood_ncls ${NUM_CLASSES} \ + --ind_ncls 101 \ + --result_png F1_openness_compare_${OOD_DATA}.png + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_openness_new.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_openness_new.sh new file mode 100644 index 0000000..9360697 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_openness_new.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATA=$1 # HMDB or MiT +RESULT_FILES=("results_baselines/openmax/TSM_OpenMax_${OOD_DATA}_result.npz" \ # openmax + "results/TSM_DNN_BALD_${OOD_DATA}_result.npz" \ # mc dropout + "results/TSM_BNN_BALD_${OOD_DATA}_result.npz" \ # bnn svi + "results_baselines/openmax/TSM_OpenMax_${OOD_DATA}_result.npz" \ # softmax + "results_baselines/rpl/TSM_RPL_${OOD_DATA}_result.npz" \ # rpl + "results/TSM_EDLNoKLAvUCDebias_EDL_${OOD_DATA}_result.npz") # dear (ours) +THRESHOLDS=(-1 \ + 0.000022 \ + 0.000003 \ + 0.999683 \ + 0.999167 \ + 0.004549) + +# OOD Detection comparison +# The folders `results/` and `results_baselines` are in the `experiments/tsm/` folder. +python experiments/compare_openness_new.py \ + --base_model tsm \ + --ood_data ${OOD_DATA} \ + --baseline_results ${RESULT_FILES[@]} + +# OOD Detection comparison by using thresholds +echo 'Results by using thresholds:' +python experiments/compare_openness_new.py \ + --base_model tsm \ + --ood_data ${OOD_DATA} \ + --thresholds ${THRESHOLDS[@]} \ + --baseline_results ${RESULT_FILES[@]} + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_reliability_evaluation.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_reliability_evaluation.sh new file mode 100644 index 0000000..42723d4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/run_reliability_evaluation.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +export CUDA_HOME='/usr/local/cuda' + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +OOD_DATASET=$1 +MODEL=$2 +RESULT_DIR='experiments/tsm/results' + +case ${MODEL} in + dnn) + # DNN with Dropout model + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/TSM_DNN_BALD_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/TSM_DNN_BALD_${OOD_DATASET}_reliability + ;; + edlnokl_avuc_debias) + # Evidential Deep Learning (without KL divergence loss term) with AvU Calibration and Debiasing + CUDA_VISIBLE_DEVICES=${DEVICE} python experiments/evaluate_calibration.py \ + --ood_result ${RESULT_DIR}/TSM_EDLNoKLAvUCDebias_EDL_${OOD_DATASET}_result.npz \ + --save_prefix ${RESULT_DIR}/../results_reliability/TSM_EDLNoKLAvUCDebias_EDL_${OOD_DATASET}_reliability + ;; + *) + echo "Invalid model: "${MODEL} + exit + ;; +esac + + +cd $pwd_dir +echo "Experiments finished!" \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/train_tsm_DEAR_kinetics10.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/train_tsm_DEAR_kinetics10.sh new file mode 100644 index 0000000..5c1c558 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/train_tsm_DEAR_kinetics10.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tsm/train_kinetics10_tsm_DEAR.py \ + --work-dir work_dirs/tsm/train_kinetics10_tsm_DEAR \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/experiments/tsm/train_tsm_DEAR_noDebias_kinetics10.sh b/Downstream/Open-Set-Action-Recognition/experiments/tsm/train_tsm_DEAR_noDebias_kinetics10.sh new file mode 100644 index 0000000..d92c1a7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/experiments/tsm/train_tsm_DEAR_noDebias_kinetics10.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +pwd_dir=$pwd +cd ../../ + +source activate mmaction + +# --validate +CUDA_VISIBLE_DEVICES=$1 python tools/train.py configs/recognition/tsm/train_kinetics10_tsm_DEAR_noDebias.py \ + --work-dir work_dirs/tsm/train_kinetics10_tsm_DEAR_noDebias \ + --validate \ + --seed 0 \ + --deterministic \ + --gpu-ids 0 + +cd $pwd_dir +echo "Experiments finished!" diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/__init__.py new file mode 100644 index 0000000..168e52a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/__init__.py @@ -0,0 +1,15 @@ +import mmcv +from mmcv import digit_version + +from .version import __version__ + +mmcv_minimum_version = '1.1.1' +mmcv_maximum_version = '1.3' +mmcv_version = digit_version(mmcv.__version__) + +assert (digit_version(mmcv_minimum_version) <= mmcv_version + <= digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' + +__all__ = ['__version__'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/apis/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/apis/__init__.py new file mode 100644 index 0000000..d1962e6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/apis/__init__.py @@ -0,0 +1,8 @@ +from .inference import inference_recognizer, init_recognizer +from .test import multi_gpu_test, single_gpu_test, collect_results_cpu +from .train import train_model + +__all__ = [ + 'train_model', 'init_recognizer', 'inference_recognizer', 'multi_gpu_test', + 'single_gpu_test', 'collect_results_cpu' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/apis/inference.py b/Downstream/Open-Set-Action-Recognition/mmaction/apis/inference.py new file mode 100644 index 0000000..46f0273 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/apis/inference.py @@ -0,0 +1,120 @@ +import os +import os.path as osp +from operator import itemgetter + +import mmcv +import torch +from mmcv.parallel import collate, scatter +from mmcv.runner import load_checkpoint + +from ..datasets.pipelines import Compose +from ..models import build_recognizer + + +def init_recognizer(config, + checkpoint=None, + device='cuda:0', + use_frames=False): + """Initialize a recognizer from config file. + + Args: + config (str | :obj:`mmcv.Config`): Config file path or the config + object. + checkpoint (str | None, optional): Checkpoint path/url. If set to None, + the model will not load any weights. Default: None. + device (str | :obj:`torch.device`): The desired device of returned + tensor. Default: 'cuda:0'. + use_frames (bool): Whether to use rawframes as input. Default:False. + + Returns: + nn.Module: The constructed recognizer. + """ + if isinstance(config, str): + config = mmcv.Config.fromfile(config) + elif not isinstance(config, mmcv.Config): + raise TypeError('config must be a filename or Config object, ' + f'but got {type(config)}') + if ((use_frames and config.dataset_type != 'RawframeDataset') + or (not use_frames and config.dataset_type != 'VideoDataset')): + input_type = 'rawframes' if use_frames else 'video' + raise RuntimeError('input data type should be consist with the ' + f'dataset type in config, but got input type ' + f"'{input_type}' and dataset type " + f"'{config.dataset_type}'") + # pretrained model is unnecessary since we directly load checkpoint later + config.model.backbone.pretrained = None + model = build_recognizer(config.model, test_cfg=config.test_cfg) + if checkpoint is not None: + load_checkpoint(model, checkpoint, map_location=device) + model.cfg = config + model.to(device) + model.eval() + return model + + +def inference_recognizer(model, video_path, label_path, use_frames=False): + """Inference a video with the detector. + + Args: + model (nn.Module): The loaded recognizer. + video_path (str): The video file path/url or the rawframes directory + path. If ``use_frames`` is set to True, it should be rawframes + directory path. Otherwise, it should be video file path. + label_path (str): The label file path. + use_frames (bool): Whether to use rawframes as input. Default:False. + + Returns: + dict[tuple(str, float)]: Top-5 recognition result dict. + """ + if not (osp.exists(video_path) or video_path.startswith('http')): + raise RuntimeError(f"'{video_path}' is missing") + + if osp.isfile(video_path) and use_frames: + raise RuntimeError( + f"'{video_path}' is a video file, not a rawframe directory") + if osp.isdir(video_path) and not use_frames: + raise RuntimeError( + f"'{video_path}' is a rawframe directory, not a video file") + + cfg = model.cfg + device = next(model.parameters()).device # model device + # construct label map + with open(label_path, 'r') as f: + label = [line.strip() for line in f] + # build the data pipeline + test_pipeline = cfg.data.test.pipeline + test_pipeline = Compose(test_pipeline) + # prepare data + if use_frames: + filename_tmpl = cfg.data.test.get('filename_tmpl', 'img_{:05}.jpg') + modality = cfg.data.test.get('modality', 'RGB') + start_index = cfg.data.test.get('start_index', 1) + data = dict( + frame_dir=video_path, + total_frames=len(os.listdir(video_path)), + # assuming files in ``video_path`` are all named with ``filename_tmpl`` # noqa: E501 + label=-1, + start_index=start_index, + filename_tmpl=filename_tmpl, + modality=modality) + else: + start_index = cfg.data.test.get('start_index', 0) + data = dict( + filename=video_path, + label=-1, + start_index=start_index, + modality='RGB') + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + if next(model.parameters()).is_cuda: + # scatter to specified GPU + data = scatter(data, [device])[0] + + # forward the model + with torch.no_grad(): + scores = model(return_loss=False, **data)[0] + score_tuples = tuple(zip(label, scores)) + score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) + + top5_label = score_sorted[:5] + return top5_label diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/apis/test.py b/Downstream/Open-Set-Action-Recognition/mmaction/apis/test.py new file mode 100644 index 0000000..3da98ba --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/apis/test.py @@ -0,0 +1,188 @@ +import os.path as osp +import pickle +import shutil +import tempfile +import pdb +import mmcv +import torch +import torch.distributed as dist +from mmcv.runner import get_dist_info + + +def single_gpu_test(model, data_loader): + """Test model with a single gpu. + + This method tests model with a single gpu and displays test progress bar. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for data in data_loader: + with torch.no_grad(): + result = model(return_loss=False, **data) + results.extend(result) + + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=True): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. Default: None + gpu_collect (bool): Option to use either gpu or cpu to collect results. + Default: True + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + for data in data_loader: + with torch.no_grad(): + # pdb.set_trace() + result = model(return_loss=False, **data) + results.extend(result) + + if rank == 0: + # use the first key as main key to calculate the batch size + batch_size = len(next(iter(data.values()))) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + """Collect results in cpu mode. + + It saves the results on different gpus to 'tmpdir' and collects + them by the rank 0 worker. + + Args: + result_part (list): Results to be collected + size (int): Result size. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. Default: None + + Returns: + list: Ordered results. + """ + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mmcv.mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # synchronizes all processes to make sure tmpdir exist + dist.barrier() + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + # synchronizes all processes for loding pickle file + dist.barrier() + # collect all parts + if rank != 0: + return None + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + """Collect results in gpu mode. + + It encodes results to gpu tensors and use gpu communication for results + collection. + + Args: + result_part (list): Results to be collected + size (int): Result size. + + Returns: + list: Ordered results. + """ + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_list.append( + pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results + return None diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/apis/train.py b/Downstream/Open-Set-Action-Recognition/mmaction/apis/train.py new file mode 100644 index 0000000..ac78083 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/apis/train.py @@ -0,0 +1,151 @@ +import copy as cp + +import torch +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, OptimizerHook, + build_optimizer) +from mmcv.runner.hooks import Fp16OptimizerHook + +from ..core import (DistEpochEvalHook, EpochEvalHook, + OmniSourceDistSamplerSeedHook, OmniSourceRunner, AnnealingRunner) +from ..datasets import build_dataloader, build_dataset +from ..utils import get_root_logger + + +def train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + """Train model entry function. + + Args: + model (nn.Module): The model to be trained. + dataset (:obj:`Dataset`): Train dataset. + cfg (dict): The config dict for training. + distributed (bool): Whether to use distributed training. + Default: False. + validate (bool): Whether to do evaluation. Default: False. + timestamp (str | None): Local time for runner. Default: None. + meta (dict | None): Meta dict to record some important information. + Default: None + """ + logger = get_root_logger(log_level=cfg.log_level) + + # prepare data loaders + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + num_gpus=len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + pin_memory=cfg.data.get('pin_memory', True)) # by default, pin_memory=True + dataloader_setting = dict(dataloader_setting, + **cfg.data.get('train_dataloader', {})) + + if cfg.omnisource: + # The option can override videos_per_gpu + train_ratio = cfg.data.get('train_ratio', [1] * len(dataset)) + omni_videos_per_gpu = cfg.data.get('omni_videos_per_gpu', None) + if omni_videos_per_gpu is None: + dataloader_settings = [dataloader_setting] * len(dataset) + else: + dataloader_settings = [] + for videos_per_gpu in omni_videos_per_gpu: + this_setting = cp.deepcopy(dataloader_setting) + this_setting['videos_per_gpu'] = videos_per_gpu + dataloader_settings.append(this_setting) + data_loaders = [ + build_dataloader(ds, **setting) + for ds, setting in zip(dataset, dataloader_settings) + ] + + else: + data_loaders = [ + build_dataloader(ds, **dataloader_setting) for ds in dataset + ] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = MMDataParallel( + model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + # Runner = OmniSourceRunner if cfg.omnisource else EpochBasedRunner + if cfg.omnisource: + Runner = OmniSourceRunner + elif cfg.get('annealing_runner', False): + Runner = AnnealingRunner # add annealing runner support + else: + Runner = EpochBasedRunner + runner = Runner( + model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta) + # an ugly workaround to make .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = OptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + if distributed: + if cfg.omnisource: + runner.register_hook(OmniSourceDistSamplerSeedHook()) + else: + runner.register_hook(DistSamplerSeedHook()) + + if validate: + eval_cfg = cfg.get('evaluation', {}) + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + dataloader_setting = dict( + videos_per_gpu=cfg.data.get('videos_per_gpu', 1), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), + # cfg.gpus will be ignored if distributed + num_gpus=len(cfg.gpu_ids), + dist=distributed, + pin_memory=cfg.data.get('pin_memory', True), # by default, pin_memory=True + shuffle=False) + dataloader_setting = dict(dataloader_setting, + **cfg.data.get('val_dataloader', {})) + val_dataloader = build_dataloader(val_dataset, **dataloader_setting) + eval_hook = DistEpochEvalHook if distributed else EpochEvalHook + runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner_kwargs = dict() + if cfg.omnisource: + runner_kwargs = dict(train_ratio=train_ratio) + if cfg.get('annealing_runner', False): + runner_kwargs.update(annealing=True) + runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/__init__.py new file mode 100644 index 0000000..402c24c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/__init__.py @@ -0,0 +1,4 @@ +from .evaluation import * # noqa: F401, F403 +from .lr import * # noqa: F401, F403 +from .optimizer import * # noqa: F401, F403 +from .runner import * # noqa: F401, F403 diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/__init__.py new file mode 100644 index 0000000..4a2fab5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/__init__.py @@ -0,0 +1,17 @@ +from .accuracy import (average_precision_at_temporal_iou, + average_recall_at_avg_proposals, confusion_matrix, + get_weighted_score, interpolated_precision_recall, + mean_average_precision, mean_class_accuracy, + mmit_mean_average_precision, pairwise_temporal_iou, + softmax, top_k_accuracy) +from .eval_detection import ActivityNetDetection +from .eval_hooks import DistEpochEvalHook, EpochEvalHook + +__all__ = [ + 'DistEpochEvalHook', 'EpochEvalHook', 'top_k_accuracy', + 'mean_class_accuracy', 'confusion_matrix', 'mean_average_precision', + 'get_weighted_score', 'average_recall_at_avg_proposals', + 'pairwise_temporal_iou', 'average_precision_at_temporal_iou', + 'ActivityNetDetection', 'softmax', 'interpolated_precision_recall', + 'mmit_mean_average_precision' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/accuracy.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/accuracy.py new file mode 100644 index 0000000..86527b9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/accuracy.py @@ -0,0 +1,519 @@ +import numpy as np + + +def confusion_matrix(y_pred, y_real, normalize=None): + """Compute confusion matrix. + + Args: + y_pred (list[int] | np.ndarray[int]): Prediction labels. + y_real (list[int] | np.ndarray[int]): Ground truth labels. + normalize (str | None): Normalizes confusion matrix over the true + (rows), predicted (columns) conditions or all the population. + If None, confusion matrix will not be normalized. Options are + "true", "pred", "all", None. Default: None. + + Returns: + np.ndarray: Confusion matrix. + """ + if normalize not in ['true', 'pred', 'all', None]: + raise ValueError("normalize must be one of {'true', 'pred', " + "'all', None}") + + if isinstance(y_pred, list): + y_pred = np.array(y_pred) + if not isinstance(y_pred, np.ndarray): + raise TypeError( + f'y_pred must be list or np.ndarray, but got {type(y_pred)}') + if not y_pred.dtype == np.int64: + raise TypeError( + f'y_pred dtype must be np.int64, but got {y_pred.dtype}') + + if isinstance(y_real, list): + y_real = np.array(y_real) + if not isinstance(y_real, np.ndarray): + raise TypeError( + f'y_real must be list or np.ndarray, but got {type(y_real)}') + if not y_real.dtype == np.int64: + raise TypeError( + f'y_real dtype must be np.int64, but got {y_real.dtype}') + + label_set = np.unique(np.concatenate((y_pred, y_real))) + num_labels = len(label_set) + label_map = {label: i for i, label in enumerate(label_set)} + confusion_mat = np.zeros((num_labels, num_labels), dtype=np.int64) + for rlabel, plabel in zip(y_real, y_pred): + index_real = label_map[rlabel] + index_pred = label_map[plabel] + confusion_mat[index_real][index_pred] += 1 + + with np.errstate(all='ignore'): + if normalize == 'true': + confusion_mat = ( + confusion_mat / confusion_mat.sum(axis=1, keepdims=True)) + elif normalize == 'pred': + confusion_mat = ( + confusion_mat / confusion_mat.sum(axis=0, keepdims=True)) + elif normalize == 'all': + confusion_mat = (confusion_mat / confusion_mat.sum()) + confusion_mat = np.nan_to_num(confusion_mat) + + return confusion_mat + + +def mean_class_accuracy(scores, labels): + """Calculate mean class accuracy. + + Args: + scores (list[np.ndarray]): Prediction scores for each class. + labels (list[int]): Ground truth labels. + + Returns: + np.ndarray: Mean class accuracy. + """ + pred = np.argmax(scores, axis=1) + cf_mat = confusion_matrix(pred, labels).astype(float) + + cls_cnt = cf_mat.sum(axis=1) + cls_hit = np.diag(cf_mat) + + mean_class_acc = np.mean( + [hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)]) + + return mean_class_acc + + +def top_k_accuracy(scores, labels, topk=(1, )): + """Calculate top k accuracy score. + + Args: + scores (list[np.ndarray]): Prediction scores for each class. + labels (list[int]): Ground truth labels. + topk (tuple[int]): K value for top_k_accuracy. Default: (1, ). + + Returns: + list[float]: Top k accuracy score for each k. + """ + res = [] + labels = np.array(labels)[:, np.newaxis] + for k in topk: + max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1] + match_array = np.logical_or.reduce(max_k_preds == labels, axis=1) + topk_acc_score = match_array.sum() / match_array.shape[0] + res.append(topk_acc_score) + + return res + + +def mmit_mean_average_precision(scores, labels): + """Mean average precision for multi-label recognition. Used for reporting + MMIT style mAP on Multi-Moments in Times. The difference is that this + method calculates average-precision for each sample and averages them among + samples. + + Args: + scores (list[np.ndarray]): Prediction scores of different classes for + each sample. + labels (list[np.ndarray]): Ground truth many-hot vector for each + sample. + + Returns: + np.float: The MMIT style mean average precision. + """ + results = [] + for score, label in zip(scores, labels): + precision, recall, _ = binary_precision_recall_curve(score, label) + ap = -np.sum(np.diff(recall) * np.array(precision)[:-1]) + results.append(ap) + return np.mean(results) + + +def mean_average_precision(scores, labels): + """Mean average precision for multi-label recognition. + + Args: + scores (list[np.ndarray]): Prediction scores of different classes for + each sample. + labels (list[np.ndarray]): Ground truth many-hot vector for each + sample. + + Returns: + np.float: The mean average precision. + """ + results = [] + scores = np.stack(scores).T + labels = np.stack(labels).T + + for score, label in zip(scores, labels): + precision, recall, _ = binary_precision_recall_curve(score, label) + ap = -np.sum(np.diff(recall) * np.array(precision)[:-1]) + results.append(ap) + results = [x for x in results if not np.isnan(x)] + if results == []: + return np.nan + return np.mean(results) + + +def binary_precision_recall_curve(y_score, y_true): + """Calculate the binary precision recall curve at step thresholds. + + Args: + y_score (np.ndarray): Prediction scores for each class. + Shape should be (num_classes, ). + y_true (np.ndarray): Ground truth many-hot vector. + Shape should be (num_classes, ). + + Returns: + precision (np.ndarray): The precision of different thresholds. + recall (np.ndarray): The recall of different thresholds. + thresholds (np.ndarray): Different thresholds at which precison and + recall are tested. + """ + assert isinstance(y_score, np.ndarray) + assert isinstance(y_true, np.ndarray) + assert y_score.shape == y_true.shape + + # make y_true a boolean vector + y_true = (y_true == 1) + # sort scores and corresponding truth values + desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1] + y_score = y_score[desc_score_indices] + y_true = y_true[desc_score_indices] + # There may be ties in values, therefore find the `distinct_value_inds` + distinct_value_inds = np.where(np.diff(y_score))[0] + threshold_inds = np.r_[distinct_value_inds, y_true.size - 1] + # accumulate the true positives with decreasing threshold + tps = np.cumsum(y_true)[threshold_inds] + fps = 1 + threshold_inds - tps + thresholds = y_score[threshold_inds] + + precision = tps / (tps + fps) + precision[np.isnan(precision)] = 0 + recall = tps / tps[-1] + # stop when full recall attained + # and reverse the outputs so recall is decreasing + last_ind = tps.searchsorted(tps[-1]) + sl = slice(last_ind, None, -1) + + return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl] + + +def pairwise_temporal_iou(candidate_segments, + target_segments, + calculate_overlap_self=False): + """Compute intersection over union between segments. + + Args: + candidate_segments (np.ndarray): 1-dim/2-dim array in format + ``[init, end]/[m x 2:=[init, end]]``. + target_segments (np.ndarray): 2-dim array in format + ``[n x 2:=[init, end]]``. + calculate_overlap_self (bool): Whether to calculate overlap_self + (union / candidate_length) or not. Default: False. + + Returns: + t_iou (np.ndarray): 1-dim array [n] / + 2-dim array [n x m] with IoU ratio. + t_overlap_self (np.ndarray, optional): 1-dim array [n] / + 2-dim array [n x m] with overlap_self, returns when + calculate_overlap_self is True. + """ + candidate_segments_ndim = candidate_segments.ndim + if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]: + raise ValueError('Dimension of arguments is incorrect') + + if candidate_segments_ndim == 1: + candidate_segments = candidate_segments[np.newaxis, :] + + n, m = target_segments.shape[0], candidate_segments.shape[0] + t_iou = np.empty((n, m), dtype=np.float32) + if calculate_overlap_self: + t_overlap_self = np.empty((n, m), dtype=np.float32) + + for i in range(m): + candidate_segment = candidate_segments[i, :] + tt1 = np.maximum(candidate_segment[0], target_segments[:, 0]) + tt2 = np.minimum(candidate_segment[1], target_segments[:, 1]) + # Intersection including Non-negative overlap score. + segments_intersection = (tt2 - tt1).clip(0) + # Segment union. + segments_union = ((target_segments[:, 1] - target_segments[:, 0]) + + (candidate_segment[1] - candidate_segment[0]) - + segments_intersection) + # Compute overlap as the ratio of the intersection + # over union of two segments. + t_iou[:, i] = (segments_intersection.astype(float) / segments_union) + if calculate_overlap_self: + candidate_length = candidate_segment[1] - candidate_segment[0] + t_overlap_self[:, i] = ( + segments_intersection.astype(float) / candidate_length) + + if candidate_segments_ndim == 1: + t_iou = np.squeeze(t_iou, axis=1) + if calculate_overlap_self: + if candidate_segments_ndim == 1: + t_overlap_self = np.squeeze(t_overlap_self, axis=1) + return t_iou, t_overlap_self + + return t_iou + + +def average_recall_at_avg_proposals(ground_truth, + proposals, + total_num_proposals, + max_avg_proposals=None, + temporal_iou_thresholds=np.linspace( + 0.5, 0.95, 10)): + """Computes the average recall given an average number (percentile) of + proposals per video. + + Args: + ground_truth (dict): Dict containing the ground truth instances. + proposals (dict): Dict containing the proposal instances. + total_num_proposals (int): Total number of proposals in the + proposal dict. + max_avg_proposals (int | None): Max number of proposals for one video. + Default: None. + temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou + thresholds. Default: ``np.linspace(0.5, 0.95, 10)``. + + Returns: + tuple([np.ndarray, np.ndarray, np.ndarray, float]): + (recall, average_recall, proposals_per_video, auc) + In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold + at the j-th average number (percentile) of average number of + proposals per video. The average_recall is recall averaged + over a list of temporal_iou threshold (1D array). This is + equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video`` + is the average number of proposals per video. The auc is the area + under ``AR@AN`` curve. + """ + + total_num_videos = len(ground_truth) + + if not max_avg_proposals: + max_avg_proposals = float(total_num_proposals) / total_num_videos + + ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals) + + # For each video, compute temporal_iou scores among the retrieved proposals + score_list = [] + total_num_retrieved_proposals = 0 + for video_id in ground_truth: + # Get proposals for this video. + proposals_video_id = proposals[video_id] + this_video_proposals = proposals_video_id[:, :2] + # Sort proposals by score. + sort_idx = proposals_video_id[:, 2].argsort()[::-1] + this_video_proposals = this_video_proposals[sort_idx, :].astype( + np.float32) + + # Get ground-truth instances associated to this video. + ground_truth_video_id = ground_truth[video_id] + this_video_ground_truth = ground_truth_video_id[:, :2].astype( + np.float32) + if this_video_proposals.shape[0] == 0: + n = this_video_ground_truth.shape[0] + score_list.append(np.zeros((n, 1))) + continue + + if this_video_proposals.ndim != 2: + this_video_proposals = np.expand_dims(this_video_proposals, axis=0) + if this_video_ground_truth.ndim != 2: + this_video_ground_truth = np.expand_dims( + this_video_ground_truth, axis=0) + + num_retrieved_proposals = np.minimum( + int(this_video_proposals.shape[0] * ratio), + this_video_proposals.shape[0]) + total_num_retrieved_proposals += num_retrieved_proposals + this_video_proposals = this_video_proposals[: + num_retrieved_proposals, :] + + # Compute temporal_iou scores. + t_iou = pairwise_temporal_iou(this_video_proposals, + this_video_ground_truth) + score_list.append(t_iou) + + # Given that the length of the videos is really varied, we + # compute the number of proposals in terms of a ratio of the total + # proposals retrieved, i.e. average recall at a percentage of proposals + # retrieved per video. + + # Computes average recall. + pcn_list = np.arange(1, 101) / 100.0 * ( + max_avg_proposals * float(total_num_videos) / + total_num_retrieved_proposals) + matches = np.empty((total_num_videos, pcn_list.shape[0])) + positives = np.empty(total_num_videos) + recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0])) + # Iterates over each temporal_iou threshold. + for ridx, temporal_iou in enumerate(temporal_iou_thresholds): + # Inspect positives retrieved per video at different + # number of proposals (percentage of the total retrieved). + for i, score in enumerate(score_list): + # Total positives per video. + positives[i] = score.shape[0] + # Find proposals that satisfies minimum temporal_iou threshold. + true_positives_temporal_iou = score >= temporal_iou + # Get number of proposals as a percentage of total retrieved. + pcn_proposals = np.minimum( + (score.shape[1] * pcn_list).astype(np.int), score.shape[1]) + + for j, num_retrieved_proposals in enumerate(pcn_proposals): + # Compute the number of matches + # for each percentage of the proposals + matches[i, j] = np.count_nonzero( + (true_positives_temporal_iou[:, :num_retrieved_proposals] + ).sum(axis=1)) + + # Computes recall given the set of matches per video. + recall[ridx, :] = matches.sum(axis=0) / positives.sum() + + # Recall is averaged. + avg_recall = recall.mean(axis=0) + + # Get the average number of proposals per video. + proposals_per_video = pcn_list * ( + float(total_num_retrieved_proposals) / total_num_videos) + # Get AUC + area_under_curve = np.trapz(avg_recall, proposals_per_video) + auc = 100. * float(area_under_curve) / proposals_per_video[-1] + return recall, avg_recall, proposals_per_video, auc + + +def get_weighted_score(score_list, coeff_list): + """Get weighted score with given scores and coefficients. + + Given n predictions by different classifier: [score_1, score_2, ..., + score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ..., + coeff_n] (coeff_list), return weighted score: weighted_score = + score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n + + Args: + score_list (list[list[np.ndarray]]): List of list of scores, with shape + n(number of predictions) X num_samples X num_classes + coeff_list (list[float]): List of coefficients, with shape n. + + Returns: + list[np.ndarray]: List of weighted scores. + """ + assert len(score_list) == len(coeff_list) + num_samples = len(score_list[0]) + for i in range(1, len(score_list)): + assert len(score_list[i]) == num_samples + + scores = np.array(score_list) # (num_coeff, num_samples, num_classes) + coeff = np.array(coeff_list) # (num_coeff, ) + weighted_scores = list(np.dot(scores.T, coeff).T) + return weighted_scores + + +def softmax(x, dim=1): + """Compute softmax values for each sets of scores in x.""" + e_x = np.exp(x - np.max(x, axis=dim, keepdims=True)) + return e_x / e_x.sum(axis=dim, keepdims=True) + + +def interpolated_precision_recall(precision, recall): + """Interpolated AP - VOCdevkit from VOC 2011. + + Args: + precision (np.ndarray): The precision of different thresholds. + recall (np.ndarray): The recall of different thresholds. + + Returns: + float: Average precision score. + """ + mprecision = np.hstack([[0], precision, [0]]) + mrecall = np.hstack([[0], recall, [1]]) + for i in range(len(mprecision) - 1)[::-1]: + mprecision[i] = max(mprecision[i], mprecision[i + 1]) + idx = np.where(mrecall[1::] != mrecall[0:-1])[0] + 1 + ap = np.sum((mrecall[idx] - mrecall[idx - 1]) * mprecision[idx]) + return ap + + +def average_precision_at_temporal_iou(ground_truth, + prediction, + temporal_iou_thresholds=(np.linspace( + 0.5, 0.95, 10))): + """Compute average precision (in detection task) between ground truth and + predicted data frames. If multiple predictions match the same predicted + segment, only the one with highest score is matched as true positive. This + code is greatly inspired by Pascal VOC devkit. + + Args: + ground_truth (dict): Dict containing the ground truth instances. + Key: 'video_id' + Value (np.ndarray): 1D array of 't-start' and 't-end'. + prediction (np.ndarray): 2D array containing the information of + proposal instances, including 'video_id', 'class_id', 't-start', + 't-end' and 'score'. + temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou + thresholds. Default: ``np.linspace(0.5, 0.95, 10)``. + + Returns: + np.ndarray: 1D array of average precision score. + """ + ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32) + if len(prediction) < 1: + return ap + + num_gts = 0. + lock_gt = dict() + for key in ground_truth: + lock_gt[key] = np.ones( + (len(temporal_iou_thresholds), len(ground_truth[key]))) * -1 + num_gts += len(ground_truth[key]) + + # Sort predictions by decreasing score order. + prediction = np.array(prediction) + scores = prediction[:, 4].astype(float) + sort_idx = np.argsort(scores)[::-1] + prediction = prediction[sort_idx] + + # Initialize true positive and false positive vectors. + tp = np.zeros((len(temporal_iou_thresholds), len(prediction)), + dtype=np.int32) + fp = np.zeros((len(temporal_iou_thresholds), len(prediction)), + dtype=np.int32) + + # Assigning true positive to truly grount truth instances. + for idx, this_pred in enumerate(prediction): + + # Check if there is at least one ground truth in the video. + if this_pred[0] in ground_truth: + this_gt = np.array(ground_truth[this_pred[0]], dtype=float) + else: + fp[:, idx] = 1 + continue + + t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt) + # We would like to retrieve the predictions with highest t_iou score. + t_iou_sorted_idx = t_iou.argsort()[::-1] + for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds): + for jdx in t_iou_sorted_idx: + if t_iou[jdx] < t_iou_threshold: + fp[t_idx, idx] = 1 + break + if lock_gt[this_pred[0]][t_idx, jdx] >= 0: + continue + # Assign as true positive after the filters above. + tp[t_idx, idx] = 1 + lock_gt[this_pred[0]][t_idx, jdx] = idx + break + + if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0: + fp[t_idx, idx] = 1 + + tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32) + fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32) + recall_cumsum = tp_cumsum / num_gts + + precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum) + + for t_idx in range(len(temporal_iou_thresholds)): + ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :], + recall_cumsum[t_idx, :]) + + return ap diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_detection.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_detection.py new file mode 100644 index 0000000..1586441 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_detection.py @@ -0,0 +1,233 @@ +import json + +import numpy as np +from mmcv.utils import print_log + +from ...utils import get_root_logger +from .accuracy import interpolated_precision_recall, pairwise_temporal_iou + + +class ActivityNetDetection: + """Class to evaluate detection results on ActivityNet. + + Args: + ground_truth_filename (str | None): The filename of groundtruth. + Default: None. + prediction_filename (str | None): The filename of action detection + results. Default: None. + tiou_thresholds (np.ndarray): The thresholds of temporal iou to + evaluate. Default: ``np.linspace(0.5, 0.95, 10)``. + verbose (bool): Whether to print verbose logs. Default: False. + """ + + def __init__(self, + ground_truth_filename=None, + prediction_filename=None, + tiou_thresholds=np.linspace(0.5, 0.95, 10), + verbose=False): + if not ground_truth_filename: + raise IOError('Please input a valid ground truth file.') + if not prediction_filename: + raise IOError('Please input a valid prediction file.') + self.ground_truth_filename = ground_truth_filename + self.prediction_filename = prediction_filename + self.tiou_thresholds = tiou_thresholds + self.verbose = verbose + self.ap = None + self.logger = get_root_logger() + # Import ground truth and predictions. + self.ground_truth, self.activity_index = self._import_ground_truth( + ground_truth_filename) + self.prediction = self._import_prediction(prediction_filename) + + if self.verbose: + log_msg = ( + '[INIT] Loaded ground_truth from ' + f'{self.ground_truth_filename}, prediction from ' + f'{self.prediction_filename}.\n' + f'Number of ground truth instances: {len(self.ground_truth)}\n' + f'Number of predictions: {len(self.prediction)}\n' + f'Fixed threshold for tiou score: {self.tiou_thresholds}') + print_log(log_msg, logger=self.logger) + + @staticmethod + def _import_ground_truth(ground_truth_filename): + """Read ground truth file and return the ground truth instances and the + activity classes. + + Args: + ground_truth_filename (str): Full path to the ground truth json + file. + + Returns: + tuple[list, dict]: (ground_truth, activity_index). + ground_truth contains the ground truth instances, which is in a + dict format. + activity_index contains classes index. + """ + with open(ground_truth_filename, 'r') as f: + data = json.load(f) + # Checking format + activity_index, class_idx = {}, 0 + ground_truth = [] + for video_id, video_info in data.items(): + for anno in video_info['annotations']: + if anno['label'] not in activity_index: + activity_index[anno['label']] = class_idx + class_idx += 1 + # old video_anno + ground_truth_item = {} + ground_truth_item['video-id'] = video_id[2:] + ground_truth_item['t-start'] = float(anno['segment'][0]) + ground_truth_item['t-end'] = float(anno['segment'][1]) + ground_truth_item['label'] = activity_index[anno['label']] + ground_truth.append(ground_truth_item) + + return ground_truth, activity_index + + def _import_prediction(self, prediction_filename): + """Read prediction file and return the prediction instances. + + Args: + prediction_filename (str): Full path to the prediction json file. + + Returns: + List: List containing the prediction instances (dictionaries). + """ + with open(prediction_filename, 'r') as f: + data = json.load(f) + # Read predictions. + prediction = [] + for video_id, video_info in data['results'].items(): + for result in video_info: + prediction_item = dict() + prediction_item['video-id'] = video_id + prediction_item['label'] = self.activity_index[result['label']] + prediction_item['t-start'] = float(result['segment'][0]) + prediction_item['t-end'] = float(result['segment'][1]) + prediction_item['score'] = result['score'] + prediction.append(prediction_item) + + return prediction + + def wrapper_compute_average_precision(self): + """Computes average precision for each class.""" + ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index))) + + # Adaptation to query faster + ground_truth_by_label = [] + prediction_by_label = [] + for i in range(len(self.activity_index)): + ground_truth_by_label.append([]) + prediction_by_label.append([]) + for gt in self.ground_truth: + ground_truth_by_label[gt['label']].append(gt) + for pred in self.prediction: + prediction_by_label[pred['label']].append(pred) + + for i in range(len(self.activity_index)): + ap_result = compute_average_precision_detection( + ground_truth_by_label[i], prediction_by_label[i], + self.tiou_thresholds) + ap[:, i] = ap_result + + return ap + + def evaluate(self): + """Evaluates a prediction file. + + For the detection task we measure the interpolated mean average + precision to measure the performance of a method. + """ + self.ap = self.wrapper_compute_average_precision() + + self.mAP = self.ap.mean(axis=1) + self.average_mAP = self.mAP.mean() + + return self.mAP, self.average_mAP + + +def compute_average_precision_detection(ground_truth, + prediction, + tiou_thresholds=np.linspace( + 0.5, 0.95, 10)): + """Compute average precision (detection task) between ground truth and + predictions data frames. If multiple predictions occurs for the same + predicted segment, only the one with highest score is matches as true + positive. This code is greatly inspired by Pascal VOC devkit. + + Args: + ground_truth (list[dict]): List containing the ground truth instances + (dictionaries). Required keys are 'video-id', 't-start' and + 't-end'. + prediction (list[dict]): List containing the prediction instances + (dictionaries). Required keys are: 'video-id', 't-start', 't-end' + and 'score'. + tiou_thresholds (np.ndarray): A 1darray indicates the temporal + intersection over union threshold, which is optional. + Default: ``np.linspace(0.5, 0.95, 10)``. + + Returns: + Float: ap, Average precision score. + """ + num_thresholds = len(tiou_thresholds) + num_gts = len(ground_truth) + num_preds = len(prediction) + ap = np.zeros(num_thresholds) + if len(prediction) == 0: + return ap + + num_positive = float(num_gts) + lock_gt = np.ones((num_thresholds, num_gts)) * -1 + # Sort predictions by decreasing score order. + prediction.sort(key=lambda x: -x['score']) + # Initialize true positive and false positive vectors. + tp = np.zeros((num_thresholds, num_preds)) + fp = np.zeros((num_thresholds, num_preds)) + + # Adaptation to query faster + ground_truth_by_videoid = {} + for i, item in enumerate(ground_truth): + item['index'] = i + ground_truth_by_videoid.setdefault(item['video-id'], []).append(item) + + # Assigning true positive to truly grount truth instances. + for idx, pred in enumerate(prediction): + if pred['video-id'] in ground_truth_by_videoid: + gts = ground_truth_by_videoid[pred['video-id']] + else: + fp[:, idx] = 1 + continue + + tiou_arr = pairwise_temporal_iou( + np.array([pred['t-start'], pred['t-end']]), + np.array([np.array([gt['t-start'], gt['t-end']]) for gt in gts])) + tiou_arr = tiou_arr.reshape(-1) + # We would like to retrieve the predictions with highest tiou score. + tiou_sorted_idx = tiou_arr.argsort()[::-1] + for t_idx, tiou_threshold in enumerate(tiou_thresholds): + for j_idx in tiou_sorted_idx: + if tiou_arr[j_idx] < tiou_threshold: + fp[t_idx, idx] = 1 + break + if lock_gt[t_idx, gts[j_idx]['index']] >= 0: + continue + # Assign as true positive after the filters above. + tp[t_idx, idx] = 1 + lock_gt[t_idx, gts[j_idx]['index']] = idx + break + + if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0: + fp[t_idx, idx] = 1 + + tp_cumsum = np.cumsum(tp, axis=1).astype(np.float) + fp_cumsum = np.cumsum(fp, axis=1).astype(np.float) + recall_cumsum = tp_cumsum / num_positive + + precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum) + + for t_idx in range(len(tiou_thresholds)): + ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :], + recall_cumsum[t_idx, :]) + + return ap diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_hooks.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..40fd563 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_hooks.py @@ -0,0 +1,277 @@ +import os.path as osp +import warnings +from math import inf + +import mmcv +from mmcv.runner import Hook +from torch.utils.data import DataLoader + +from mmaction.utils import get_root_logger + + +class EpochEvalHook(Hook): + """Non-Distributed evaluation hook based on epochs. + + Notes: + If new arguments are added for EpochEvalHook, tools/test.py, + tools/eval_metric.py may be effected. + + This hook will regularly perform evaluation in a given interval when + performing in non-distributed environment. + + Args: + dataloader (DataLoader): A PyTorch dataloader. + start (int | None, optional): Evaluation starting epoch. It enables + evaluation before the training starts if ``start`` <= the resuming + epoch. If None, whether to evaluate is merely decided by + ``interval``. Default: None. + interval (int): Evaluation interval (by epochs). Default: 1. + save_best (bool): Whether to save best checkpoint during evaluation. + Default: True. + key_indicator (str | None): Key indicator to measure the best + checkpoint during evaluation when ``save_best`` is set to True. + Options are the evaluation metrics to the test dataset. e.g., + ``top1_acc``, ``top5_acc``, ``mean_class_accuracy``, + ``mean_average_precision``, ``mmit_mean_average_precision`` + for action recognition dataset (RawframeDataset and VideoDataset). + ``AR@AN``, ``auc`` for action localization dataset + (ActivityNetDataset). Default: `top1_acc`. + rule (str | None): Comparison rule for best score. Options are None, + 'greater' and 'less'. If set to None, it will infer a reasonable + rule. Default: 'None'. + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + """ + + rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y} + init_value_map = {'greater': -inf, 'less': inf} + greater_keys = ['acc', 'top', 'AR@', 'auc', 'precision'] + less_keys = ['loss'] + + def __init__(self, + dataloader, + start=None, + interval=1, + save_best=True, + key_indicator='top1_acc', + rule=None, + **eval_kwargs): + if not isinstance(dataloader, DataLoader): + raise TypeError(f'dataloader must be a pytorch DataLoader, ' + f'but got {type(dataloader)}') + if not isinstance(save_best, bool): + raise TypeError("'save_best' should be a boolean") + + if save_best and not key_indicator: + raise ValueError('key_indicator should not be None, when ' + 'save_best is set to True.') + if rule not in self.rule_map and rule is not None: + raise KeyError(f'rule must be greater, less or None, ' + f'but got {rule}.') + + if rule is None and save_best: + if any(key in key_indicator for key in self.greater_keys): + rule = 'greater' + elif any(key in key_indicator for key in self.less_keys): + rule = 'less' + else: + raise ValueError( + f'key_indicator must be in {self.greater_keys} ' + f'or in {self.less_keys} when rule is None, ' + f'but got {key_indicator}') + + if interval <= 0: + raise ValueError(f'interval must be positive, but got {interval}') + if start is not None and start < 0: + warnings.warn( + f'The evaluation start epoch {start} is smaller than 0, ' + f'use 0 instead', UserWarning) + start = 0 + + self.dataloader = dataloader + self.interval = interval + self.start = start + self.eval_kwargs = eval_kwargs + self.save_best = save_best + self.key_indicator = key_indicator + self.rule = rule + + self.logger = get_root_logger() + + if self.save_best: + self.compare_func = self.rule_map[self.rule] + self.best_score = self.init_value_map[self.rule] + + self.best_json = dict() + self.initial_epoch_flag = True + + def before_train_epoch(self, runner): + """Evaluate the model only at the start of training.""" + if not self.initial_epoch_flag: + return + if self.start is not None and runner.epoch >= self.start: + self.after_train_epoch(runner) + self.initial_epoch_flag = False + + def evaluation_flag(self, runner): + """Judge whether to perform_evaluation after this epoch. + + Returns: + bool: The flag indicating whether to perform evaluation. + """ + if self.start is None: + if not self.every_n_epochs(runner, self.interval): + # No evaluation during the interval epochs. + return False + elif (runner.epoch + 1) < self.start: + # No evaluation if start is larger than the current epoch. + return False + else: + # Evaluation only at epochs 3, 5, 7... if start==3 and interval==2 + if (runner.epoch + 1 - self.start) % self.interval: + return False + return True + + def after_train_epoch(self, runner): + """Called after every training epoch to evaluate the results.""" + if not self.evaluation_flag(runner): + return + + current_ckpt_path = osp.join(runner.work_dir, + f'epoch_{runner.epoch + 1}.pth') + json_path = osp.join(runner.work_dir, 'best.json') + + if osp.exists(json_path) and len(self.best_json) == 0: + self.best_json = mmcv.load(json_path) + self.best_score = self.best_json['best_score'] + self.best_ckpt = self.best_json['best_ckpt'] + self.key_indicator = self.best_json['key_indicator'] + + from mmaction.apis import single_gpu_test + results = single_gpu_test(runner.model, self.dataloader) + key_score = self.evaluate(runner, results) + if (self.save_best and self.compare_func(key_score, self.best_score)): + self.best_score = key_score + self.logger.info( + f'Now best checkpoint is epoch_{runner.epoch + 1}.pth') + self.best_json['best_score'] = self.best_score + self.best_json['best_ckpt'] = current_ckpt_path + self.best_json['key_indicator'] = self.key_indicator + mmcv.dump(self.best_json, json_path) + + def evaluate(self, runner, results): + """Evaluate the results. + + Args: + runner (:obj:`mmcv.Runner`): The underlined training runner. + results (list): Output results. + """ + eval_res = self.dataloader.dataset.evaluate( + results, logger=runner.logger, **self.eval_kwargs) + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True + if self.key_indicator is not None: + if self.key_indicator not in eval_res: + warnings.warn('The key indicator for evaluation is not ' + 'included in evaluation result, please specify ' + 'it in config file') + return None + return eval_res[self.key_indicator] + + return None + + +class DistEpochEvalHook(EpochEvalHook): + """Distributed evaluation hook based on epochs. + + This hook will regularly perform evaluation in a given interval when + performing in distributed environment. + + Args: + dataloader (DataLoader): A PyTorch dataloader. + start (int | None, optional): Evaluation starting epoch. It enables + evaluation before the training starts if ``start`` <= the resuming + epoch. If None, whether to evaluate is merely decided by + ``interval``. Default: None. + interval (int): Evaluation interval (by epochs). Default: 1. + save_best (bool): Whether to save best checkpoint during evaluation. + Default: True. + key_indicator (str | None): Key indicator to measure the best + checkpoint during evaluation when ``save_best`` is set to True. + Options are the evaluation metrics to the test dataset. e.g., + ``top1_acc``, ``top5_acc``, ``mean_class_accuracy``, + ``mean_average_precision``, ``mmit_mean_average_precision`` + for action recognition dataset (RawframeDataset and VideoDataset). + ``AR@AN``, ``auc`` for action localization dataset + (ActivityNetDataset). Default: `top1_acc`. + rule (str | None): Comparison rule for best score. Options are None, + 'greater' and 'less'. If set to None, it will infer a reasonable + rule. Default: 'None'. + tmpdir (str | None): Temporary directory to save the results of all + processes. Default: None. + gpu_collect (bool): Whether to use gpu or cpu to collect results. + Default: False. + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + """ + + def __init__(self, + dataloader, + start=None, + interval=1, + save_best=True, + key_indicator='top1_acc', + rule=None, + tmpdir=None, + gpu_collect=False, + **eval_kwargs): + super().__init__( + dataloader, + start=start, + interval=interval, + save_best=save_best, + key_indicator=key_indicator, + rule=rule, + **eval_kwargs) + self.tmpdir = tmpdir + self.gpu_collect = gpu_collect + + def after_train_epoch(self, runner): + """Called after each training epoch to evaluate the model.""" + if not self.evaluation_flag(runner): + return + + current_ckpt_path = osp.join(runner.work_dir, + f'epoch_{runner.epoch + 1}.pth') + json_path = osp.join(runner.work_dir, 'best.json') + + if osp.exists(json_path) and len(self.best_json) == 0: + self.best_json = mmcv.load(json_path) + self.best_score = self.best_json['best_score'] + self.best_ckpt = self.best_json['best_ckpt'] + self.key_indicator = self.best_json['key_indicator'] + + from mmaction.apis import multi_gpu_test + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + results = multi_gpu_test( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + key_score = self.evaluate(runner, results) + if (self.save_best and key_score is not None + and self.compare_func(key_score, self.best_score)): + self.best_score = key_score + self.logger.info( + f'Now best checkpoint is epoch_{runner.epoch + 1}.pth') + self.best_json['best_score'] = self.best_score + self.best_json['best_ckpt'] = current_ckpt_path + self.best_json['key_indicator'] = self.key_indicator + mmcv.dump(self.best_json, json_path) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/lr/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/lr/__init__.py new file mode 100644 index 0000000..f2a2975 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/lr/__init__.py @@ -0,0 +1,3 @@ +from .tin_lr_hook import TINLrUpdaterHook + +__all__ = ['TINLrUpdaterHook'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/lr/tin_lr_hook.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/lr/tin_lr_hook.py new file mode 100644 index 0000000..4fededd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/lr/tin_lr_hook.py @@ -0,0 +1,39 @@ +from mmcv.runner import HOOKS, LrUpdaterHook +from mmcv.runner.hooks.lr_updater import annealing_cos + + +@HOOKS.register_module() +class TINLrUpdaterHook(LrUpdaterHook): + + def __init__(self, min_lr, **kwargs): + self.min_lr = min_lr + super(TINLrUpdaterHook, self).__init__(**kwargs) + + def get_warmup_lr(self, cur_iters): + if self.warmup == 'linear': + # 'linear' warmup is rewritten according to TIN repo: + # https://github.com/deepcs233/TIN/blob/master/main.py#L409-L412 + k = (cur_iters / self.warmup_iters) * ( + 1 - self.warmup_ratio) + self.warmup_ratio + warmup_lr = [_lr * k for _lr in self.regular_lr] + elif self.warmup == 'constant': + warmup_lr = [_lr * self.warmup_ratio for _lr in self.regular_lr] + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_lr = [_lr * k for _lr in self.regular_lr] + return warmup_lr + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + target_lr = self.min_lr + if self.warmup is not None: + progress = progress - self.warmup_iters + max_progress = max_progress - self.warmup_iters + factor = progress / max_progress + return annealing_cos(base_lr, target_lr, factor) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/__init__.py new file mode 100644 index 0000000..40a70dd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/__init__.py @@ -0,0 +1,4 @@ +from .copy_of_sgd import CopyOfSGD +from .tsm_optimizer_constructor import TSMOptimizerConstructor + +__all__ = ['CopyOfSGD', 'TSMOptimizerConstructor'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/copy_of_sgd.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/copy_of_sgd.py new file mode 100644 index 0000000..9dd3d27 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/copy_of_sgd.py @@ -0,0 +1,11 @@ +from mmcv.runner import OPTIMIZERS +from torch.optim import SGD + + +@OPTIMIZERS.register_module() +class CopyOfSGD(SGD): + """A clone of torch.optim.SGD. + + A customized optimizer could be defined like CopyOfSGD. You may derive from + built-in optimizers in torch.optim, or directly implement a new optimizer. + """ diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/tsm_optimizer_constructor.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/tsm_optimizer_constructor.py new file mode 100644 index 0000000..b2024ed --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/tsm_optimizer_constructor.py @@ -0,0 +1,108 @@ +import torch +from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor +from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd + + +@OPTIMIZER_BUILDERS.register_module() +class TSMOptimizerConstructor(DefaultOptimizerConstructor): + """Optimizer constructor in TSM model. + + This constructor builds optimizer in different ways from the default one. + + 1. Parameters of the first conv layer have default lr and weight decay. + 2. Parameters of BN layers have default lr and zero weight decay. + 3. If the field "fc_lr5" in paramwise_cfg is set to True, the parameters + of the last fc layer in cls_head have 5x lr multiplier and 10x weight + decay multiplier. + 4. Weights of other layers have default lr and weight decay, and biases + have a 2x lr multiplier and zero weight decay. + """ + + def add_params(self, params, model): + """Add parameters and their corresponding lr and wd to the params. + + Args: + params (list): The list to be modified, containing all parameter + groups and their corresponding lr and wd configurations. + model (nn.Module): The model to be trained with the optimizer. + """ + # use fc_lr5 to determine whether to specify higher multi-factor + # for fc layer weights and bias. + fc_lr5 = self.paramwise_cfg['fc_lr5'] + first_conv_weight = [] + first_conv_bias = [] + normal_weight = [] + normal_bias = [] + lr5_weight = [] + lr10_bias = [] + bn = [] + + conv_cnt = 0 + + for m in model.modules(): + if isinstance(m, _ConvNd): + m_params = list(m.parameters()) + conv_cnt += 1 + if conv_cnt == 1: + first_conv_weight.append(m_params[0]) + if len(m_params) == 2: + first_conv_bias.append(m_params[1]) + else: + normal_weight.append(m_params[0]) + if len(m_params) == 2: + normal_bias.append(m_params[1]) + elif isinstance(m, torch.nn.Linear): + m_params = list(m.parameters()) + normal_weight.append(m_params[0]) + normal_bias.append(m_params[1]) + elif isinstance(m, + (_BatchNorm, SyncBatchNorm, torch.nn.GroupNorm)): + for param in list(m.parameters()): + if param.requires_grad: + bn.append(param) + elif len(m._modules) == 0: + if len(list(m.parameters())) > 0: + raise ValueError(f'New atomic module type: {type(m)}. ' + 'Need to give it a learning policy') + + # pop the cls_head fc layer params + last_fc_weight = normal_weight.pop() + last_fc_bias = normal_bias.pop() + if fc_lr5: + lr5_weight.append(last_fc_weight) + lr10_bias.append(last_fc_bias) + else: + normal_weight.append(last_fc_weight) + normal_bias.append(last_fc_bias) + + params.append({ + 'params': first_conv_weight, + 'lr': self.base_lr, + 'weight_decay': self.base_wd + }) + params.append({ + 'params': first_conv_bias, + 'lr': self.base_lr * 2, + 'weight_decay': 0 + }) + params.append({ + 'params': normal_weight, + 'lr': self.base_lr, + 'weight_decay': self.base_wd + }) + params.append({ + 'params': normal_bias, + 'lr': self.base_lr * 2, + 'weight_decay': 0 + }) + params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0}) + params.append({ + 'params': lr5_weight, + 'lr': self.base_lr * 5, + 'weight_decay': self.base_wd + }) + params.append({ + 'params': lr10_bias, + 'lr': self.base_lr * 10, + 'weight_decay': 0 + }) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/__init__.py new file mode 100644 index 0000000..a56426b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/__init__.py @@ -0,0 +1,4 @@ +from .omnisource_runner import OmniSourceDistSamplerSeedHook, OmniSourceRunner +from .annealing_runner import AnnealingRunner + +__all__ = ['OmniSourceRunner', 'OmniSourceDistSamplerSeedHook', 'AnnealingRunner'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/annealing_runner.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/annealing_runner.py new file mode 100644 index 0000000..b660245 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/annealing_runner.py @@ -0,0 +1,67 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import os.path as osp +import platform +import shutil +import time +import warnings + +import torch + +import mmcv +from mmcv.runner import EpochBasedRunner + +class AnnealingRunner(EpochBasedRunner): + + def run_iter(self, data_batch, train_mode, **kwargs): + + if 'annealing' in kwargs: + kwargs.update(epoch=self.epoch) + kwargs.update(total_epoch=self.max_epochs) + kwargs.update(iter=self._iter) + + if self.batch_processor is not None: + outputs = self.batch_processor( + self.model, data_batch, train_mode=train_mode, **kwargs) + elif train_mode: + outputs = self.model.train_step(data_batch, self.optimizer, + **kwargs) + else: + outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('"batch_processor()" or "model.train_step()"' + 'and "model.val_step()" must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + + def train(self, data_loader, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._max_iters = self._max_epochs * len(self.data_loader) + self.call_hook('before_train_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self._inner_iter = i + self.call_hook('before_train_iter') + self.run_iter(data_batch, train_mode=True, **kwargs) + self.call_hook('after_train_iter') + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + + def val(self, data_loader, **kwargs): + self.model.eval() + self.mode = 'val' + self.data_loader = data_loader + self.call_hook('before_val_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self._inner_iter = i + self.call_hook('before_val_iter') + with torch.no_grad(): + self.run_iter(data_batch, train_mode=False, **kwargs) + self.call_hook('after_val_iter') + + self.call_hook('after_val_epoch') \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/omnisource_runner.py b/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/omnisource_runner.py new file mode 100644 index 0000000..0209d5d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/core/runner/omnisource_runner.py @@ -0,0 +1,162 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import time +import warnings + +import mmcv +from mmcv.runner import EpochBasedRunner, Hook +from mmcv.runner.utils import get_host_info + + +def cycle(iterable): + iterator = iter(iterable) + while True: + try: + yield next(iterator) + except StopIteration: + iterator = iter(iterable) + + +class OmniSourceDistSamplerSeedHook(Hook): + + def before_epoch(self, runner): + for data_loader in runner.data_loaders: + if hasattr(data_loader.sampler, 'set_epoch'): + # in case the data loader uses `SequentialSampler` in Pytorch + data_loader.sampler.set_epoch(runner.epoch) + elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch'): + # batch sampler in pytorch wraps the sampler as its attributes. + data_loader.batch_sampler.sampler.set_epoch(runner.epoch) + + +class OmniSourceRunner(EpochBasedRunner): + """OmniSource Epoch-based Runner. + + This runner train models epoch by epoch, the epoch length is defined by the + dataloader[0], which is the main dataloader. + """ + + def run_iter(self, data_batch, train_mode, source, **kwargs): + if self.batch_processor is not None: + outputs = self.batch_processor( + self.model, data_batch, train_mode=train_mode, **kwargs) + elif train_mode: + outputs = self.model.train_step(data_batch, self.optimizer, + **kwargs) + else: + outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('"batch_processor()" or "model.train_step()"' + 'and "model.val_step()" must return a dict') + # Since we have multiple sources, we add a suffix to log_var names, + # so that we can differentiate them. + if 'log_vars' in outputs: + log_vars = outputs['log_vars'] + log_vars = {k + source: v for k, v in log_vars.items()} + self.log_buffer.update(log_vars, outputs['num_samples']) + + self.outputs = outputs + + def train(self, data_loaders, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loaders = data_loaders + self.main_loader = self.data_loaders[0] + # Add aliasing + self.data_loader = self.main_loader + self.aux_loaders = self.data_loaders[1:] + self.aux_iters = [cycle(loader) for loader in self.aux_loaders] + + auxiliary_iter_times = [1] * len(self.aux_loaders) + use_aux_per_niter = 1 + if 'train_ratio' in kwargs: + train_ratio = kwargs.pop('train_ratio') + use_aux_per_niter = train_ratio[0] + auxiliary_iter_times = train_ratio[1:] + + self._max_iters = self._max_epochs * len(self.main_loader) + + self.call_hook('before_train_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + + for i, data_batch in enumerate(self.main_loader): + self._inner_iter = i + self.call_hook('before_train_iter') + self.run_iter(data_batch, train_mode=True, source='') + self.call_hook('after_train_iter') + + if self._iter % use_aux_per_niter != 0: + self._iter += 1 + continue + + for idx, n_times in enumerate(auxiliary_iter_times): + for _ in range(n_times): + data_batch = next(self.aux_iters[idx]) + self.call_hook('before_train_iter') + self.run_iter( + data_batch, train_mode=True, source=f'/aux{idx}') + self.call_hook('after_train_iter') + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + + # Now that we use validate hook, not implement this func to save efforts. + def val(self, data_loader, **kwargs): + raise NotImplementedError + + def run(self, data_loaders, workflow, max_epochs=None, **kwargs): + """Start running. + + Args: + data_loaders (list[:obj:`DataLoader`]): Dataloaders for training. + `data_loaders[0]` is the main data_loader, which contains + target datasets and determines the epoch length. + `data_loaders[1:]` are auxiliary data loaders, which contain + auxiliary web datasets. + workflow (list[tuple]): A list of (phase, epochs) to specify the + running order and epochs. E.g, [('train', 2)] means running 2 + epochs for training iteratively. Note that val epoch is not + supported for this runner for simplicity. + max_epochs (int | None): The max epochs that training lasts, + deprecated now. Default: None. + """ + assert isinstance(data_loaders, list) + assert mmcv.is_list_of(workflow, tuple) + assert len(workflow) == 1 and workflow[0][0] == 'train' + if max_epochs is not None: + warnings.warn( + 'setting max_epochs in run is deprecated, ' + 'please set max_epochs in runner_config', DeprecationWarning) + self._max_epochs = max_epochs + + assert self._max_epochs is not None, ( + 'max_epochs must be specified during instantiation') + + mode, epochs = workflow[0] + self._max_iters = self._max_epochs * len(data_loaders[0]) + + work_dir = self.work_dir if self.work_dir is not None else 'NONE' + self.logger.info('Start running, host: %s, work_dir: %s', + get_host_info(), work_dir) + self.logger.info('workflow: %s, max: %d epochs', workflow, + self._max_epochs) + self.call_hook('before_run') + + while self.epoch < self._max_epochs: + if isinstance(mode, str): # self.train() + if not hasattr(self, mode): + raise ValueError( + f'runner has no method named "{mode}" to run an ' + 'epoch') + epoch_runner = getattr(self, mode) + else: + raise TypeError( + f'mode in workflow must be a str, but got {mode}') + + for _ in range(epochs): + if mode == 'train' and self.epoch >= self._max_epochs: + break + epoch_runner(data_loaders, **kwargs) + + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_run') diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/__init__.py new file mode 100644 index 0000000..83d5a39 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/__init__.py @@ -0,0 +1,21 @@ +from .activitynet_dataset import ActivityNetDataset +from .audio_dataset import AudioDataset +from .audio_feature_dataset import AudioFeatureDataset +from .audio_visual_dataset import AudioVisualDataset +from .ava_dataset import AVADataset +from .base import BaseDataset +from .builder import build_dataloader, build_dataset +from .dataset_wrappers import RepeatDataset +from .hvu_dataset import HVUDataset +from .image_dataset import ImageDataset +from .rawframe_dataset import RawframeDataset +from .rawvideo_dataset import RawVideoDataset +from .ssn_dataset import SSNDataset +from .video_dataset import VideoDataset + +__all__ = [ + 'VideoDataset', 'build_dataloader', 'build_dataset', 'RepeatDataset', + 'RawframeDataset', 'BaseDataset', 'ActivityNetDataset', 'SSNDataset', + 'HVUDataset', 'AudioDataset', 'AudioFeatureDataset', 'ImageDataset', + 'RawVideoDataset', 'AVADataset', 'AudioVisualDataset' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/activitynet_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/activitynet_dataset.py new file mode 100644 index 0000000..d448cf1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/activitynet_dataset.py @@ -0,0 +1,268 @@ +import copy +import os +import os.path as osp +import warnings + +import mmcv +import numpy as np + +from ..core import average_recall_at_avg_proposals +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class ActivityNetDataset(BaseDataset): + """ActivityNet dataset for temporal action localization. + + The dataset loads raw features and apply specified transforms to return a + dict containing the frame tensors and other information. + + The ann_file is a json file with multiple objects, and each object has a + key of the name of a video, and value of total frames of the video, total + seconds of the video, annotations of a video, feature frames (frames + covered by features) of the video, fps and rfps. Example of a + annotation file: + + .. code-block:: JSON + + { + "v_--1DO2V4K74": { + "duration_second": 211.53, + "duration_frame": 6337, + "annotations": [ + { + "segment": [ + 30.025882995319815, + 205.2318595943838 + ], + "label": "Rock climbing" + } + ], + "feature_frame": 6336, + "fps": 30.0, + "rfps": 29.9579255898 + }, + "v_--6bJUbfpnQ": { + "duration_second": 26.75, + "duration_frame": 647, + "annotations": [ + { + "segment": [ + 2.578755070202808, + 24.914101404056165 + ], + "label": "Drinking beer" + } + ], + "feature_frame": 624, + "fps": 24.0, + "rfps": 24.1869158879 + }, + ... + } + + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + data_prefix (str | None): Path to a directory where videos are held. + Default: None. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + """ + + def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False): + super().__init__(ann_file, pipeline, data_prefix, test_mode) + + def load_annotations(self): + """Load the annotation according to ann_file into video_infos.""" + video_infos = [] + anno_database = mmcv.load(self.ann_file) + for video_name in anno_database: + video_info = anno_database[video_name] + video_info['video_name'] = video_name + video_infos.append(video_info) + return video_infos + + def prepare_test_frames(self, idx): + """Prepare the frames for testing given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + results['data_prefix'] = self.data_prefix + return self.pipeline(results) + + def prepare_train_frames(self, idx): + """Prepare the frames for training given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + results['data_prefix'] = self.data_prefix + return self.pipeline(results) + + def __len__(self): + """Get the size of the dataset.""" + return len(self.video_infos) + + def _import_ground_truth(self): + """Read ground truth data from video_infos.""" + ground_truth = {} + for video_info in self.video_infos: + video_id = video_info['video_name'][2:] + this_video_ground_truths = [] + for ann in video_info['annotations']: + t_start, t_end = ann['segment'] + label = ann['label'] + this_video_ground_truths.append([t_start, t_end, label]) + ground_truth[video_id] = np.array(this_video_ground_truths) + return ground_truth + + @staticmethod + def proposals2json(results, show_progress=False): + """Convert all proposals to a final dict(json) format. + + Args: + results (list[dict]): All proposals. + show_progress (bool): Whether to show the progress bar. + Defaults: False. + + Returns: + dict: The final result dict. E.g. + + .. code-block:: Python + + dict(video-1=[dict(segment=[1.1,2.0]. score=0.9), + dict(segment=[50.1, 129.3], score=0.6)]) + """ + result_dict = {} + print('Convert proposals to json format') + if show_progress: + prog_bar = mmcv.ProgressBar(len(results)) + for result in results: + video_name = result['video_name'] + result_dict[video_name[2:]] = result['proposal_list'] + if show_progress: + prog_bar.update() + return result_dict + + @staticmethod + def _import_proposals(results): + """Read predictions from results.""" + proposals = {} + num_proposals = 0 + for result in results: + video_id = result['video_name'][2:] + this_video_proposals = [] + for proposal in result['proposal_list']: + t_start, t_end = proposal['segment'] + score = proposal['score'] + this_video_proposals.append([t_start, t_end, score]) + num_proposals += 1 + proposals[video_id] = np.array(this_video_proposals) + return proposals, num_proposals + + def dump_results(self, results, out, output_format, version='VERSION 1.3'): + """Dump data to json/csv files.""" + if output_format == 'json': + result_dict = self.proposals2json(results) + output_dict = { + 'version': version, + 'results': result_dict, + 'external_data': {} + } + mmcv.dump(output_dict, out) + elif output_format == 'csv': + # TODO: add csv handler to mmcv and use mmcv.dump + os.makedirs(out, exist_ok=True) + header = 'action,start,end,tmin,tmax' + for result in results: + video_name, outputs = result + output_path = osp.join(out, video_name + '.csv') + np.savetxt( + output_path, + outputs, + header=header, + delimiter=',', + comments='') + else: + raise ValueError( + f'The output format {output_format} is not supported.') + + def evaluate( + self, + results, + metrics='AR@AN', + metric_options={ + 'AR@AN': + dict( + max_avg_proposals=100, + temporal_iou_thresholds=np.linspace(0.5, 0.95, 10)) + }, + logger=None, + **deprecated_kwargs): + """Evaluation in feature dataset. + + Args: + results (list[dict]): Output results. + metrics (str | sequence[str]): Metrics to be performed. + Defaults: 'AR@AN'. + metric_options (dict): Dict for metric options. Options are + ``max_avg_proposals``, ``temporal_iou_thresholds`` for + ``AR@AN``. + default: ``{'AR@AN': dict(max_avg_proposals=100, + temporal_iou_thresholds=np.linspace(0.5, 0.95, 10))}``. + logger (logging.Logger | None): Training logger. Defaults: None. + deprecated_kwargs (dict): Used for containing deprecated arguments. + See 'https://github.com/open-mmlab/mmaction2/pull/286'. + + Returns: + dict: Evaluation results for evaluation metrics. + """ + # Protect ``metric_options`` since it uses mutable value as default + metric_options = copy.deepcopy(metric_options) + + if deprecated_kwargs != {}: + warnings.warn( + 'Option arguments for metrics has been changed to ' + "`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501 + 'for more details') + metric_options['AR@AN'] = dict(metric_options['AR@AN'], + **deprecated_kwargs) + + if not isinstance(results, list): + raise TypeError(f'results must be a list, but got {type(results)}') + assert len(results) == len(self), ( + f'The length of results is not equal to the dataset len: ' + f'{len(results)} != {len(self)}') + + metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics] + allowed_metrics = ['AR@AN'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + + eval_results = {} + ground_truth = self._import_ground_truth() + proposal, num_proposals = self._import_proposals(results) + + for metric in metrics: + if metric == 'AR@AN': + temporal_iou_thresholds = metric_options.setdefault( + 'AR@AN', {}).setdefault('temporal_iou_thresholds', + np.linspace(0.5, 0.95, 10)) + max_avg_proposals = metric_options.setdefault( + 'AR@AN', {}).setdefault('max_avg_proposals', 100) + if isinstance(temporal_iou_thresholds, list): + temporal_iou_thresholds = np.array(temporal_iou_thresholds) + + recall, _, _, auc = ( + average_recall_at_avg_proposals( + ground_truth, + proposal, + num_proposals, + max_avg_proposals=max_avg_proposals, + temporal_iou_thresholds=temporal_iou_thresholds)) + eval_results['auc'] = auc + eval_results['AR@1'] = np.mean(recall[:, 0]) + eval_results['AR@5'] = np.mean(recall[:, 4]) + eval_results['AR@10'] = np.mean(recall[:, 9]) + eval_results['AR@100'] = np.mean(recall[:, 99]) + + return eval_results diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_dataset.py new file mode 100644 index 0000000..4443402 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_dataset.py @@ -0,0 +1,69 @@ +import os.path as osp + +import torch + +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class AudioDataset(BaseDataset): + """Audio dataset for video recognition. Extracts the audio feature on-the- + fly. Annotation file can be that of the rawframe dataset, or: + + .. code-block:: txt + + some/directory-1.wav 163 1 + some/directory-2.wav 122 1 + some/directory-3.wav 258 2 + some/directory-4.wav 234 2 + some/directory-5.wav 295 3 + some/directory-6.wav 121 3 + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + suffix (str): The suffix of the audio file. Default: '.wav'. + kwargs (dict): Other keyword args for `BaseDataset`. + """ + + def __init__(self, ann_file, pipeline, suffix='.wav', **kwargs): + self.suffix = suffix + super().__init__(ann_file, pipeline, modality='Audio', **kwargs) + + def load_annotations(self): + """Load annotation file to get video information.""" + if self.ann_file.endswith('.json'): + return self.load_json_annotations() + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split() + video_info = {} + idx = 0 + filename = line_split[idx] + if self.data_prefix is not None: + if not filename.endswith(self.suffix): + filename = osp.join(self.data_prefix, + filename + self.suffix) + else: + filename = osp.join(self.data_prefix, filename) + video_info['audio_path'] = filename + idx += 1 + # idx for total_frames + video_info['total_frames'] = int(line_split[idx]) + idx += 1 + # idx for label[s] + label = [int(x) for x in line_split[idx:]] + assert label, f'missing label in line: {line}' + if self.multi_class: + assert self.num_classes is not None + onehot = torch.zeros(self.num_classes) + onehot[label] = 1.0 + video_info['label'] = onehot + else: + assert len(label) == 1 + video_info['label'] = label[0] + video_infos.append(video_info) + + return video_infos diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_feature_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_feature_dataset.py new file mode 100644 index 0000000..15daa11 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_feature_dataset.py @@ -0,0 +1,70 @@ +import os.path as osp + +import torch + +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class AudioFeatureDataset(BaseDataset): + """Audio feature dataset for video recognition. Reads the features + extracted off-line. Annotation file can be that of the rawframe dataset, + or: + + .. code-block:: txt + + some/directory-1.npy 163 1 + some/directory-2.npy 122 1 + some/directory-3.npy 258 2 + some/directory-4.npy 234 2 + some/directory-5.npy 295 3 + some/directory-6.npy 121 3 + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + suffix (str): The suffix of the audio feature file. Default: '.npy'. + kwargs (dict): Other keyword args for `BaseDataset`. + """ + + def __init__(self, ann_file, pipeline, suffix='.npy', **kwargs): + self.suffix = suffix + super().__init__(ann_file, pipeline, modality='Audio', **kwargs) + + def load_annotations(self): + """Load annotation file to get video information.""" + if self.ann_file.endswith('.json'): + return self.load_json_annotations() + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split() + video_info = {} + idx = 0 + filename = line_split[idx] + if self.data_prefix is not None: + if not filename.endswith(self.suffix): + filename = osp.join(self.data_prefix, + filename) + self.suffix + else: + filename = osp.join(self.data_prefix, filename) + video_info['audio_path'] = filename + idx += 1 + # idx for total_frames + video_info['total_frames'] = int(line_split[idx]) + idx += 1 + # idx for label[s] + label = [int(x) for x in line_split[idx:]] + assert label, f'missing label in line: {line}' + if self.multi_class: + assert self.num_classes is not None + onehot = torch.zeros(self.num_classes) + onehot[label] = 1.0 + video_info['label'] = onehot + else: + assert len(label) == 1 + video_info['label'] = label[0] + video_infos.append(video_info) + + return video_infos diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_visual_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_visual_dataset.py new file mode 100644 index 0000000..4a1b97f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/audio_visual_dataset.py @@ -0,0 +1,76 @@ +import os.path as osp + +from .rawframe_dataset import RawframeDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class AudioVisualDataset(RawframeDataset): + """Dataset that reads both audio and visual data, supporting both rawframes + and videos. The annotation file is same as that of the rawframe dataset, + such as: + + .. code-block:: txt + + some/directory-1 163 1 + some/directory-2 122 1 + some/directory-3 258 2 + some/directory-4 234 2 + some/directory-5 295 3 + some/directory-6 121 3 + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + audio_prefix (str): Directory of the audio files. + kwargs (dict): Other keyword args for `RawframeDataset`. `video_prefix` + is also allowed if pipeline is designed for videos. + """ + + def __init__(self, ann_file, pipeline, audio_prefix, **kwargs): + self.audio_prefix = audio_prefix + self.video_prefix = kwargs.pop('video_prefix', None) + self.data_prefix = kwargs.get('data_prefix', None) + super().__init__(ann_file, pipeline, **kwargs) + + def load_annotations(self): + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split() + video_info = {} + idx = 0 + # idx for frame_dir + frame_dir = line_split[idx] + if self.audio_prefix is not None: + audio_path = osp.join(self.audio_prefix, + frame_dir + '.npy') + video_info['audio_path'] = audio_path + if self.video_prefix: + video_path = osp.join(self.video_prefix, + frame_dir + '.mp4') + video_info['filename'] = video_path + if self.data_prefix is not None: + frame_dir = osp.join(self.data_prefix, frame_dir) + video_info['frame_dir'] = frame_dir + idx += 1 + if self.with_offset: + # idx for offset and total_frames + video_info['offset'] = int(line_split[idx]) + video_info['total_frames'] = int(line_split[idx + 1]) + idx += 2 + else: + # idx for total_frames + video_info['total_frames'] = int(line_split[idx]) + idx += 1 + # idx for label[s] + label = [int(x) for x in line_split[idx:]] + assert len(label), f'missing label in line: {line}' + if self.multi_class: + assert self.num_classes is not None + video_info['label'] = label + else: + assert len(label) == 1 + video_info['label'] = label[0] + video_infos.append(video_info) + return video_infos diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/ava_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/ava_dataset.py new file mode 100644 index 0000000..a333bfa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/ava_dataset.py @@ -0,0 +1,244 @@ +import copy +import os.path as osp +from collections import defaultdict + +import mmcv +import numpy as np + +from ..utils import get_root_logger +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class AVADataset(BaseDataset): + """AVA dataset for spatial temporal detection. + + Based on official AVA annotation files, the dataset loads raw frames, + bounding boxes, proposals and applies specified transformations to return + a dict containing the frame tensors and other information. + + This datasets can load information from the following files: + + .. code-block:: txt + + ann_file -> ava_{train, val}_{v2.1, v2.2}.csv + exclude_file -> ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv + label_file -> ava_action_list_{v2.1, v2.2}.pbtxt / + ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt + proposal_file -> ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl + + Particularly, the proposal_file is a pickle file which contains + ``img_key`` (in format of ``{video_id},{timestamp}``). Example of a pickle + file: + + .. code-block:: JSON + + { + ... + '0f39OWEqJ24,0902': + array([[0.011 , 0.157 , 0.655 , 0.983 , 0.998163]]), + '0f39OWEqJ24,0912': + array([[0.054 , 0.088 , 0.91 , 0.998 , 0.068273], + [0.016 , 0.161 , 0.519 , 0.974 , 0.984025], + [0.493 , 0.283 , 0.981 , 0.984 , 0.983621]]), + ... + } + + Args: + ann_file (str): Path to the annotation file like + ``ava_{train, val}_{v2.1, v2.2}.csv``. + exclude_file (str): Path to the excluded timestamp file like + ``ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv``. + pipeline (list[dict | callable]): A sequence of data transforms. + label_file (str): Path to the label file like + ``ava_action_list_{v2.1, v2.2}.pbtxt`` or + ``ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt``. + Default: None. + filename_tmpl (str): Template for each filename. + Default: 'img_{:05}.jpg'. + proposal_file (str): Path to the proposal file like + ``ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl``. + Default: None. + data_prefix (str): Path to a directory where videos are held. + Default: None. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + modality (str): Modality of data. Support 'RGB', 'Flow'. + Default: 'RGB'. + num_max_proposals (int): Max proposals number to store. Default: 1000. + timestamp_start (int): The start point of included timestamps. The + default value is referred from the official website. Default: 902. + timestamp_end (int): The end point of included timestamps. The + default value is referred from the official website. Default: 1798. + """ + + _FPS = 30 + _NUM_CLASSES = 81 + + def __init__(self, + ann_file, + exclude_file, + pipeline, + label_file=None, + filename_tmpl='img_{:05}.jpg', + proposal_file=None, + data_prefix=None, + test_mode=False, + modality='RGB', + num_max_proposals=1000, + timestamp_start=902, + timestamp_end=1798): + # since it inherits from `BaseDataset`, some arguments + # should be assigned before performing `load_annotations()` + self.exclude_file = exclude_file + self.label_file = label_file + self.proposal_file = proposal_file + self.filename_tmpl = filename_tmpl + self.num_max_proposals = num_max_proposals + self.timestamp_start = timestamp_start + self.timestamp_end = timestamp_end + self.logger = get_root_logger() + super().__init__( + ann_file, pipeline, data_prefix, test_mode, modality=modality) + + if self.proposal_file is not None: + self.proposals = mmcv.load(self.proposal_file) + else: + self.proposals = None + + if not test_mode: + valid_indexes = self.filter_exclude_file() + self.logger.info( + f'{len(valid_indexes)} out of {len(self.video_infos)} ' + f'frames are valid.') + self.video_infos = self.video_infos = [ + self.video_infos[i] for i in valid_indexes + ] + + def parse_img_record(self, img_records): + bboxes, labels, entity_ids = [], [], [] + while len(img_records) > 0: + img_record = img_records[0] + num_img_records = len(img_records) + selected_records = list( + filter( + lambda x: np.array_equal(x['entity_box'], img_record[ + 'entity_box']), img_records)) + num_selected_records = len(selected_records) + img_records = list( + filter( + lambda x: not np.array_equal(x['entity_box'], img_record[ + 'entity_box']), img_records)) + assert len(img_records) + num_selected_records == num_img_records + + bboxes.append(img_record['entity_box']) + valid_labels = np.array([ + selected_record['label'] + for selected_record in selected_records + ]) + + padded_labels = np.pad( + valid_labels, (0, self._NUM_CLASSES - valid_labels.shape[0]), + 'constant', + constant_values=-1) + labels.append(padded_labels) + entity_ids.append(img_record['entity_id']) + bboxes = np.stack(bboxes) + labels = np.stack(labels) + entity_ids = np.stack(entity_ids) + return bboxes, labels, entity_ids + + def filter_exclude_file(self): + valid_indexes = [] + if self.exclude_file is None: + valid_indexes = list(range(len(self.video_infos))) + else: + exclude_video_infos = [ + x.strip().split(',') for x in open(self.exclude_file) + ] + for i, video_info in enumerate(self.video_infos): + valid_indexes.append(i) + for video_id, timestamp in exclude_video_infos: + if (video_info['video_id'] == video_id + and video_info['timestamp'] == int(timestamp)): + valid_indexes.pop() + break + return valid_indexes + + def load_annotations(self): + video_infos = [] + records_dict_by_img = defaultdict(list) + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split(',') + + video_id = line_split[0] + timestamp = int(line_split[1]) + img_key = f'{video_id},{timestamp:04d}' + + entity_box = np.array(list(map(float, line_split[2:6]))) + label = int(line_split[6]) + entity_id = int(line_split[7]) + shot_info = (0, (self.timestamp_end - self.timestamp_start) * + self._FPS) + + video_info = dict( + video_id=video_id, + timestamp=timestamp, + entity_box=entity_box, + label=label, + entity_id=entity_id, + shot_info=shot_info) + records_dict_by_img[img_key].append(video_info) + + for img_key in records_dict_by_img: + video_id, timestamp = img_key.split(',') + bboxes, labels, entity_ids = self.parse_img_record( + records_dict_by_img[img_key]) + ann = dict( + entity_boxes=bboxes, labels=labels, entity_ids=entity_ids) + frame_dir = video_id + if self.data_prefix is not None: + frame_dir = osp.join(self.data_prefix, frame_dir) + video_info = dict( + frame_dir=frame_dir, + video_id=video_id, + timestamp=int(timestamp), + img_key=img_key, + shot_info=shot_info, + fps=self._FPS, + ann=ann) + video_infos.append(video_info) + + return video_infos + + def prepare_train_frames(self, idx): + """Prepare the frames for training given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + img_key = results['img_key'] + + results['filename_tmpl'] = self.filename_tmpl + results['modality'] = self.modality + results['start_index'] = self.start_index + results['timestamp_start'] = self.timestamp_start + results['timestamp_end'] = self.timestamp_end + results['proposals'] = self.proposals[img_key][:self.num_max_proposals] + + return self.pipeline(results) + + def prepare_test_frames(self, idx): + """Prepare the frames for testing given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + img_key = results['img_key'] + + results['filename_tmpl'] = self.filename_tmpl + results['modality'] = self.modality + results['start_index'] = self.start_index + results['timestamp_start'] = self.timestamp_start + results['timestamp_end'] = self.timestamp_end + results['proposals'] = self.proposals[img_key][:self.num_max_proposals] + return self.pipeline(results) + + def evaluate(self, results, metrics, metric_options, logger): + raise NotImplementedError diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/base.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/base.py new file mode 100644 index 0000000..fbfc7f9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/base.py @@ -0,0 +1,280 @@ +import copy +import os.path as osp +import warnings +from abc import ABCMeta, abstractmethod +from collections import defaultdict + +import mmcv +import numpy as np +import torch +from mmcv.utils import print_log +from torch.utils.data import Dataset + +from ..core import (mean_average_precision, mean_class_accuracy, + mmit_mean_average_precision, top_k_accuracy) +from .pipelines import Compose + + +class BaseDataset(Dataset, metaclass=ABCMeta): + """Base class for datasets. + + All datasets to process video should subclass it. + All subclasses should overwrite: + + - Methods:`load_annotations`, supporting to load information from an + annotation file. + - Methods:`prepare_train_frames`, providing train data. + - Methods:`prepare_test_frames`, providing test data. + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + data_prefix (str | None): Path to a directory where videos are held. + Default: None. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + multi_class (bool): Determines whether the dataset is a multi-class + dataset. Default: False. + num_classes (int | None): Number of classes of the dataset, used in + multi-class datasets. Default: None. + start_index (int): Specify a start index for frames in consideration of + different filename format. However, when taking videos as input, + it should be set to 0, since frames loaded from videos count + from 0. Default: 1. + modality (str): Modality of data. Support 'RGB', 'Flow', 'Audio'. + Default: 'RGB'. + sample_by_class (bool): Sampling by class, should be set `True` when + performing inter-class data balancing. Only compatible with + `multi_class == False`. Only applies for training. Default: False. + power (float | None): We support sampling data with the probability + proportional to the power of its label frequency (freq ^ power) + when sampling data. `power == 1` indicates uniformly sampling all + data; `power == 0` indicates uniformly sampling all classes. + Default: None. + """ + + def __init__(self, + ann_file, + pipeline, + data_prefix=None, + test_mode=False, + multi_class=False, + num_classes=None, + start_index=1, + modality='RGB', + sample_by_class=False, + power=None): + super().__init__() + + self.ann_file = ann_file + self.data_prefix = osp.realpath( + data_prefix) if data_prefix is not None and osp.isdir( + data_prefix) else data_prefix + self.test_mode = test_mode + self.multi_class = multi_class + self.num_classes = num_classes + self.start_index = start_index + self.modality = modality + self.sample_by_class = sample_by_class + self.power = power + assert not (self.multi_class and self.sample_by_class) + + self.pipeline = Compose(pipeline) + self.video_infos = self.load_annotations() + if self.sample_by_class: + self.video_infos_by_class = self.parse_by_class() + + @abstractmethod + def load_annotations(self): + """Load the annotation according to ann_file into video_infos.""" + + # json annotations already looks like video_infos, so for each dataset, + # this func should be the same + def load_json_annotations(self): + """Load json annotation file to get video information.""" + video_infos = mmcv.load(self.ann_file) + num_videos = len(video_infos) + path_key = 'frame_dir' if 'frame_dir' in video_infos[0] else 'filename' + for i in range(num_videos): + path_value = video_infos[i][path_key] + if self.data_prefix is not None: + path_value = osp.join(self.data_prefix, path_value) + video_infos[i][path_key] = path_value + if self.multi_class: + assert self.num_classes is not None + else: + assert len(video_infos[i]['label']) == 1 + video_infos[i]['label'] = video_infos[i]['label'][0] + return video_infos + + def parse_by_class(self): + video_infos_by_class = defaultdict(list) + for item in self.video_infos: + label = item['label'] + video_infos_by_class[label].append(item) + return video_infos_by_class + + @staticmethod + def label2array(num, label): + arr = np.zeros(num, dtype=np.float32) + arr[label] = 1. + return arr + + def evaluate(self, + results, + metrics='top_k_accuracy', + metric_options=dict(top_k_accuracy=dict(topk=(1, 5))), + logger=None, + **deprecated_kwargs): + """Perform evaluation for common datasets. + + Args: + results (list): Output results. + metrics (str | sequence[str]): Metrics to be performed. + Defaults: 'top_k_accuracy'. + metric_options (dict): Dict for metric options. Options are + ``topk`` for ``top_k_accuracy``. + Default: ``dict(top_k_accuracy=dict(topk=(1, 5)))``. + logger (logging.Logger | None): Logger for recording. + Default: None. + deprecated_kwargs (dict): Used for containing deprecated arguments. + See 'https://github.com/open-mmlab/mmaction2/pull/286'. + + Returns: + dict: Evaluation results dict. + """ + # Protect ``metric_options`` since it uses mutable value as default + metric_options = copy.deepcopy(metric_options) + + if deprecated_kwargs != {}: + warnings.warn( + 'Option arguments for metrics has been changed to ' + "`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501 + 'for more details') + metric_options['top_k_accuracy'] = dict( + metric_options['top_k_accuracy'], **deprecated_kwargs) + + if not isinstance(results, list): + raise TypeError(f'results must be a list, but got {type(results)}') + assert len(results) == len(self), ( + f'The length of results is not equal to the dataset len: ' + f'{len(results)} != {len(self)}') + + metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics] + allowed_metrics = [ + 'top_k_accuracy', 'mean_class_accuracy', 'mean_average_precision', + 'mmit_mean_average_precision' + ] + + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + + eval_results = {} + gt_labels = [ann['label'] for ann in self.video_infos] + + for metric in metrics: + msg = f'Evaluating {metric} ...' + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + if metric == 'top_k_accuracy': + topk = metric_options.setdefault('top_k_accuracy', + {}).setdefault( + 'topk', (1, 5)) + if not isinstance(topk, (int, tuple)): + raise TypeError('topk must be int or tuple of int, ' + f'but got {type(topk)}') + if isinstance(topk, int): + topk = (topk, ) + + top_k_acc = top_k_accuracy(results, gt_labels, topk) + log_msg = [] + for k, acc in zip(topk, top_k_acc): + eval_results[f'top{k}_acc'] = acc + log_msg.append(f'\ntop{k}_acc\t{acc:.4f}') + log_msg = ''.join(log_msg) + print_log(log_msg, logger=logger) + continue + + if metric == 'mean_class_accuracy': + mean_acc = mean_class_accuracy(results, gt_labels) + eval_results['mean_class_accuracy'] = mean_acc + log_msg = f'\nmean_acc\t{mean_acc:.4f}' + print_log(log_msg, logger=logger) + continue + + if metric in [ + 'mean_average_precision', 'mmit_mean_average_precision' + ]: + gt_labels = [ + self.label2array(self.num_classes, label) + for label in gt_labels + ] + if metric == 'mean_average_precision': + mAP = mean_average_precision(results, gt_labels) + elif metric == 'mmit_mean_average_precision': + mAP = mmit_mean_average_precision(results, gt_labels) + eval_results['mean_average_precision'] = mAP + log_msg = f'\nmean_average_precision\t{mAP:.4f}' + print_log(log_msg, logger=logger) + continue + + return eval_results + + @staticmethod + def dump_results(results, out): + """Dump data to json/yaml/pickle strings or files.""" + return mmcv.dump(results, out) + + def prepare_train_frames(self, idx): + """Prepare the frames for training given the index.""" + if self.sample_by_class: + # Then, the idx is the class index + samples = self.video_infos_by_class[idx] + results = copy.deepcopy(np.random.choice(samples)) + else: + results = copy.deepcopy(self.video_infos[idx]) + results['modality'] = self.modality + results['start_index'] = self.start_index + + # prepare tensor in getitem + # If HVU, type(results['label']) is dict + if self.multi_class and isinstance(results['label'], list): + onehot = torch.zeros(self.num_classes) + onehot[results['label']] = 1. + results['label'] = onehot + + return self.pipeline(results) + + def prepare_test_frames(self, idx): + """Prepare the frames for testing given the index.""" + if self.sample_by_class: + # Then, the idx is the class index + samples = self.video_infos_by_class[idx] + results = copy.deepcopy(np.random.choice(samples)) + else: + results = copy.deepcopy(self.video_infos[idx]) + results['modality'] = self.modality + results['start_index'] = self.start_index + + # prepare tensor in getitem + # If HVU, type(results['label']) is dict + if self.multi_class and isinstance(results['label'], list): + onehot = torch.zeros(self.num_classes) + onehot[results['label']] = 1. + results['label'] = onehot + + return self.pipeline(results) + + def __len__(self): + """Get the size of the dataset.""" + return len(self.video_infos) + + def __getitem__(self, idx): + """Get the sample for either training or testing given index.""" + if self.test_mode: + return self.prepare_test_frames(idx) + + return self.prepare_train_frames(idx) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/builder.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/builder.py new file mode 100644 index 0000000..e7c64ab --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/builder.py @@ -0,0 +1,124 @@ +import platform +import random +from functools import partial + +import numpy as np +from mmcv.parallel import collate +from mmcv.runner import get_dist_info +from mmcv.utils import build_from_cfg +from torch.utils.data import DataLoader + +from .dataset_wrappers import RepeatDataset +from .registry import DATASETS +from .samplers import DistributedPowerSampler, DistributedSampler + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + hard_limit = rlimit[1] + soft_limit = min(4096, hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + + +def build_dataset(cfg, default_args=None): + """Build a dataset from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + default_args (dict | None, optional): Default initialization arguments. + Default: None. + + Returns: + Dataset: The constructed dataset. + """ + if cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + return dataset + + +def build_dataloader(dataset, + videos_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + seed=None, + drop_last=False, + pin_memory=True, + **kwargs): + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (:obj:`Dataset`): A PyTorch dataset. + videos_per_gpu (int): Number of videos on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. Default: 1. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + seed (int | None): Seed to be used. Default: None. + drop_last (bool): Whether to drop the last incomplete batch in epoch. + Default: False + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True + kwargs (dict, optional): Any keyword argument to be used to initialize + DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + sample_by_class = getattr(dataset, 'sample_by_class', False) + power = getattr(dataset, 'power', None) + + if dist: + if sample_by_class: + assert power is not None + sampler = DistributedPowerSampler(dataset, world_size, rank, power) + else: + sampler = DistributedSampler( + dataset, world_size, rank, shuffle=shuffle) + shuffle = False + batch_size = videos_per_gpu + num_workers = workers_per_gpu + else: + sampler = None + batch_size = num_gpus * videos_per_gpu + num_workers = num_gpus * workers_per_gpu + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=videos_per_gpu), + pin_memory=pin_memory, + shuffle=shuffle, + worker_init_fn=init_fn, + drop_last=drop_last, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + """Init the random seed for various workers.""" + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/dataset_wrappers.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/dataset_wrappers.py new file mode 100644 index 0000000..3ec8af0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/dataset_wrappers.py @@ -0,0 +1,30 @@ +from .registry import DATASETS + + +@DATASETS.register_module() +class RepeatDataset: + """A wrapper of repeated dataset. + + The length of repeated dataset will be ``times`` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + """Get data.""" + return self.dataset[idx % self._ori_len] + + def __len__(self): + """Length after repetition.""" + return self.times * self._ori_len diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/hvu_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/hvu_dataset.py new file mode 100644 index 0000000..b523748 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/hvu_dataset.py @@ -0,0 +1,189 @@ +import copy +import os.path as osp + +import mmcv +import numpy as np +from mmcv.utils import print_log + +from ..core import mean_average_precision +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class HVUDataset(BaseDataset): + """HVU dataset, which supports the recognition tags of multiple categories. + Accept both video annotation files or rawframe annotation files. + + The dataset loads videos or raw frames and applies specified transforms to + return a dict containing the frame tensors and other information. + + The ann_file is a json file with multiple dictionaries, and each dictionary + indicates a sample video with the filename and tags, the tags are organized + as different categories. Example of a video dictionary: + + .. code-block:: txt + + { + 'filename': 'gD_G1b0wV5I_001015_001035.mp4', + 'label': { + 'concept': [250, 131, 42, 51, 57, 155, 122], + 'object': [1570, 508], + 'event': [16], + 'action': [180], + 'scene': [206] + } + } + + Example of a rawframe dictionary: + + .. code-block:: txt + + { + 'frame_dir': 'gD_G1b0wV5I_001015_001035', + 'total_frames': 61 + 'label': { + 'concept': [250, 131, 42, 51, 57, 155, 122], + 'object': [1570, 508], + 'event': [16], + 'action': [180], + 'scene': [206] + } + } + + + Args: + ann_file (str): Path to the annotation file, should be a json file. + pipeline (list[dict | callable]): A sequence of data transforms. + tag_categories (list[str]): List of category names of tags. + tag_category_nums (list[int]): List of number of tags in each category. + filename_tmpl (str | None): Template for each filename. If set to None, + video dataset is used. Default: None. + **kwargs: Keyword arguments for ``BaseDataset``. + """ + + def __init__(self, + ann_file, + pipeline, + tag_categories, + tag_category_nums, + filename_tmpl=None, + **kwargs): + assert len(tag_categories) == len(tag_category_nums) + self.tag_categories = tag_categories + self.tag_category_nums = tag_category_nums + self.filename_tmpl = filename_tmpl + self.num_categories = len(self.tag_categories) + self.num_tags = sum(self.tag_category_nums) + self.category2num = dict(zip(tag_categories, tag_category_nums)) + self.start_idx = [0] + for i in range(self.num_categories - 1): + self.start_idx.append(self.start_idx[-1] + + self.tag_category_nums[i]) + self.category2startidx = dict(zip(tag_categories, self.start_idx)) + self.start_index = kwargs.pop('start_index', 0) + self.dataset_type = None + super().__init__( + ann_file, pipeline, start_index=self.start_index, **kwargs) + + def load_annotations(self): + """Load annotation file to get video information.""" + assert self.ann_file.endswith('.json') + return self.load_json_annotations() + + def load_json_annotations(self): + video_infos = mmcv.load(self.ann_file) + num_videos = len(video_infos) + + video_info0 = video_infos[0] + assert ('filename' in video_info0) != ('frame_dir' in video_info0) + path_key = 'filename' if 'filename' in video_info0 else 'frame_dir' + self.dataset_type = 'video' if path_key == 'filename' else 'rawframe' + if self.dataset_type == 'rawframe': + assert self.filename_tmpl is not None + + for i in range(num_videos): + path_value = video_infos[i][path_key] + if self.data_prefix is not None: + path_value = osp.join(self.data_prefix, path_value) + video_infos[i][path_key] = path_value + + # We will convert label to torch tensors in the pipeline + video_infos[i]['categories'] = self.tag_categories + video_infos[i]['category_nums'] = self.tag_category_nums + if self.dataset_type == 'rawframe': + video_infos[i]['filename_tmpl'] = self.filename_tmpl + video_infos[i]['start_index'] = self.start_index + video_infos[i]['modality'] = self.modality + + return video_infos + + @staticmethod + def label2array(num, label): + arr = np.zeros(num, dtype=np.float32) + arr[label] = 1. + return arr + + def evaluate(self, + results, + metrics='mean_average_precision', + metric_options=None, + logger=None): + """Evaluation in HVU Video Dataset. We only support evaluating mAP for + each tag categories. Since some tag categories are missing for some + videos, we can not evaluate mAP for all tags. + + Args: + results (list): Output results. + metrics (str | sequence[str]): Metrics to be performed. + Defaults: 'mean_average_precision'. + metric_options (dict | None): Dict for metric options. + Default: None. + logger (logging.Logger | None): Logger for recording. + Default: None. + + Returns: + dict: Evaluation results dict. + """ + # Protect ``metric_options`` since it uses mutable value as default + metric_options = copy.deepcopy(metric_options) + + if not isinstance(results, list): + raise TypeError(f'results must be a list, but got {type(results)}') + assert len(results) == len(self), ( + f'The length of results is not equal to the dataset len: ' + f'{len(results)} != {len(self)}') + + metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics] + + # There should be only one metric in the metrics list: + # 'mean_average_precision' + assert len(metrics) == 1 + metric = metrics[0] + assert metric == 'mean_average_precision' + + gt_labels = [ann['label'] for ann in self.video_infos] + + eval_results = {} + for category in self.tag_categories: + + start_idx = self.category2startidx[category] + num = self.category2num[category] + preds = [ + result[start_idx:start_idx + num] + for video_idx, result in enumerate(results) + if category in gt_labels[video_idx] + ] + gts = [ + gt_label[category] for gt_label in gt_labels + if category in gt_label + ] + + gts = [self.label2array(num, item) for item in gts] + + mAP = mean_average_precision(preds, gts) + eval_results[f'{category}_mAP'] = mAP + log_msg = f'\n{category}_mAP\t{mAP:.4f}' + print_log(log_msg, logger=logger) + + return eval_results diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/image_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/image_dataset.py new file mode 100644 index 0000000..59f859b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/image_dataset.py @@ -0,0 +1,45 @@ +from .registry import DATASETS +from .video_dataset import VideoDataset + + +@DATASETS.register_module() +class ImageDataset(VideoDataset): + """Image dataset for action recognition, used in the Project OmniSource. + + The dataset loads image list and apply specified transforms to return a + dict containing the image tensors and other information. For the + ImageDataset + + The ann_file is a text file with multiple lines, and each line indicates + the image path and the image label, which are split with a whitespace. + Example of a annotation file: + + .. code-block:: txt + + path/to/image1.jpg 1 + path/to/image2.jpg 1 + path/to/image3.jpg 2 + path/to/image4.jpg 2 + path/to/image5.jpg 3 + path/to/image6.jpg 3 + + Example of a multi-class annotation file: + + .. code-block:: txt + + path/to/image1.jpg 1 3 5 + path/to/image2.jpg 1 2 + path/to/image3.jpg 2 + path/to/image4.jpg 2 4 6 8 + path/to/image5.jpg 3 + path/to/image6.jpg 3 + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + **kwargs: Keyword arguments for ``BaseDataset``. + """ + + def __init__(self, ann_file, pipeline, **kwargs): + super().__init__(ann_file, pipeline, start_index=None, **kwargs) + # use `start_index=None` to indicate it is for `ImageDataset` diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/__init__.py new file mode 100644 index 0000000..ae3cd1a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/__init__.py @@ -0,0 +1,36 @@ +from .augmentations import (AudioAmplify, CenterCrop, ColorJitter, + EntityBoxClip, EntityBoxCrop, EntityBoxFlip, + EntityBoxPad, EntityBoxRescale, Flip, Fuse, + MelSpectrogram, MultiGroupCrop, MultiScaleCrop, + Normalize, RandomCrop, RandomRescale, + RandomResizedCrop, RandomScale, Resize, TenCrop, + ThreeCrop) +from .compose import Compose +from .formating import (Collect, FormatAudioShape, FormatShape, ImageToTensor, + ToDataContainer, ToTensor, Transpose) +from .loading import (AudioDecode, AudioDecodeInit, AudioFeatureSelector, + BuildPseudoClip, DecordDecode, DecordInit, + DenseSampleFrames, FrameSelector, + GenerateLocalizationLabels, ImageDecode, + LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature, + LoadProposals, OpenCVDecode, OpenCVInit, PyAVDecode, + PyAVDecodeMotionVector, PyAVInit, RawFrameDecode, + SampleAVAFrames, SampleFrames, SampleProposalFrames, + UntrimmedSampleFrames) + +__all__ = [ + 'SampleFrames', 'PyAVDecode', 'DecordDecode', 'DenseSampleFrames', + 'OpenCVDecode', 'FrameSelector', 'MultiGroupCrop', 'MultiScaleCrop', + 'RandomResizedCrop', 'RandomCrop', 'Resize', 'Flip', 'Fuse', 'Normalize', + 'ThreeCrop', 'CenterCrop', 'TenCrop', 'ImageToTensor', 'Transpose', + 'Collect', 'FormatShape', 'Compose', 'ToTensor', 'ToDataContainer', + 'GenerateLocalizationLabels', 'LoadLocalizationFeature', 'LoadProposals', + 'DecordInit', 'OpenCVInit', 'PyAVInit', 'SampleProposalFrames', + 'UntrimmedSampleFrames', 'RawFrameDecode', 'DecordInit', 'OpenCVInit', + 'PyAVInit', 'SampleProposalFrames', 'ColorJitter', 'LoadHVULabel', + 'SampleAVAFrames', 'AudioAmplify', 'MelSpectrogram', 'AudioDecode', + 'FormatAudioShape', 'LoadAudioFeature', 'AudioFeatureSelector', + 'AudioDecodeInit', 'EntityBoxPad', 'EntityBoxFlip', 'EntityBoxCrop', + 'EntityBoxRescale', 'EntityBoxClip', 'RandomScale', 'ImageDecode', + 'BuildPseudoClip', 'RandomRescale', 'PyAVDecodeMotionVector' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/augmentations.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/augmentations.py new file mode 100644 index 0000000..f947e80 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/augmentations.py @@ -0,0 +1,1593 @@ +import random +from collections.abc import Sequence + +import mmcv +import numpy as np +from torch.nn.modules.utils import _pair + +from ..registry import PIPELINES + + +def _init_lazy_if_proper(results, lazy): + """Initialize lazy operation properly. + + Make sure that a lazy operation is properly initialized, + and avoid a non-lazy operation accidentally getting mixed in. + + Required keys in results are "imgs" if "img_shape" not in results, + otherwise, Required keys in results are "img_shape", add or modified keys + are "img_shape", "lazy". + Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip", + "flip_direction", "interpolation". + + Args: + results (dict): A dict stores data pipeline result. + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + + if 'img_shape' not in results: + results['img_shape'] = results['imgs'][0].shape[:2] + if lazy: + if 'lazy' not in results: + img_h, img_w = results['img_shape'] + lazyop = dict() + lazyop['original_shape'] = results['img_shape'] + lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h], + dtype=np.float32) + lazyop['flip'] = False + lazyop['flip_direction'] = None + lazyop['interpolation'] = None + results['lazy'] = lazyop + else: + assert 'lazy' not in results, 'Use Fuse after lazy operations' + + +@PIPELINES.register_module() +class Fuse: + """Fuse lazy operations. + + Fusion order: + crop -> resize -> flip + + Required keys are "imgs", "img_shape" and "lazy", added or modified keys + are "imgs", "lazy". + Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction". + """ + + def __call__(self, results): + if 'lazy' not in results: + raise ValueError('No lazy operation detected') + lazyop = results['lazy'] + imgs = results['imgs'] + + # crop + left, top, right, bottom = lazyop['crop_bbox'].round().astype(int) + imgs = [img[top:bottom, left:right] for img in imgs] + + # resize + img_h, img_w = results['img_shape'] + if lazyop['interpolation'] is None: + interpolation = 'bilinear' + else: + interpolation = lazyop['interpolation'] + imgs = [ + mmcv.imresize(img, (img_w, img_h), interpolation=interpolation) + for img in imgs + ] + + # flip + if lazyop['flip']: + for img in imgs: + mmcv.imflip_(img, lazyop['flip_direction']) + + results['imgs'] = imgs + del results['lazy'] + + return results + + +@PIPELINES.register_module() +class RandomScale: + """Resize images by a random scale. + + Required keys are "imgs", "img_shape", "modality", added or modified + keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy", + "scale", "resize_size". Required keys in "lazy" is None, added or + modified key is "interpolation". + + Args: + scales (tuple[int]): Tuple of scales to be chosen for resize. + mode (str): Selection mode for choosing the scale. Options are "range" + and "value". If set to "range", The short edge will be randomly + chosen from the range of minimum and maximum on the shorter one + in all tuples. Otherwise, the longer edge will be randomly chosen + from the range of minimum and maximum on the longer one in all + tuples. Default: 'range'. + """ + + def __init__(self, scales, mode='range', **kwargs): + self.mode = mode + if self.mode not in ['range', 'value']: + raise ValueError(f"mode should be 'range' or 'value', " + f'but got {self.mode}') + self.scales = scales + self.kwargs = kwargs + + def select_scale(self, scales): + num_scales = len(scales) + if num_scales == 1: + # specify a fixed scale + scale = scales[0] + elif num_scales == 2: + if self.mode == 'range': + scale_long = [max(s) for s in scales] + scale_short = [min(s) for s in scales] + long_edge = np.random.randint( + min(scale_long), + max(scale_long) + 1) + short_edge = np.random.randint( + min(scale_short), + max(scale_short) + 1) + scale = (long_edge, short_edge) + elif self.mode == 'value': + scale = random.choice(scales) + else: + if self.mode != 'value': + raise ValueError("Only 'value' mode supports more than " + '2 image scales') + scale = random.choice(scales) + + return scale + + def __call__(self, results): + scale = self.select_scale(self.scales) + results['scale'] = scale + resize = Resize(scale, **self.kwargs) + results = resize(results) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'scales={self.scales}, mode={self.mode})') + return repr_str + + +@PIPELINES.register_module() +class EntityBoxRescale: + """Rescale the entity box and proposals according to the image shape. + + Required keys are "img_shape", "scale_factor", "proposals", + "ann.entity_boxes", added or modified keys are "ann.entity_boxes". If + original "proposals" is not None, "proposals" and "scores" will be added or + modified. + """ + + def __call__(self, results): + img_h, img_w = results['img_shape'] + scale_factor = results['scale_factor'] + scale_factor = np.array([ + scale_factor[0], scale_factor[1], scale_factor[0], scale_factor[1] + ]) + + proposals = results['proposals'] + entity_boxes = results['ann']['entity_boxes'] + img_scale = np.array([img_w, img_h, img_w, img_h]) + entity_boxes = (entity_boxes * img_scale).astype(np.float32) + results['ann']['entity_boxes'] = entity_boxes * scale_factor + + if proposals is not None: + if proposals.shape[1] not in (4, 5): + raise AssertionError('proposals shape should be in (n, 4) or ' + f'(n, 5), but got {proposals.shape}') + if proposals.shape[1] == 5: + scores = proposals[:, 4].astype(np.float32) + proposals = proposals[:, :4] + else: + scores = None + proposals = (proposals * img_scale).astype(np.float32) + results['proposals'] = proposals * scale_factor + results['scores'] = scores + + return results + + +@PIPELINES.register_module() +class EntityBoxCrop: + """Crop the entity boxes and proposals according to the cropped images. + + Required keys are "proposals", "ann.entity_boxes", "crop_bbox", added or + modified keys are "ann.entity_boxes". If original "proposals" is not None, + "proposals" will be modified. + """ + + def __call__(self, results): + proposals = results['proposals'] + entity_boxes = results['ann']['entity_boxes'] + crop_bboxes = results['crop_bbox'] + + if crop_bboxes is None: + return results + x1, y1, _, _ = crop_bboxes + + assert entity_boxes.shape[-1] % 4 == 0 + entity_boxes_ = entity_boxes.copy() + entity_boxes_[..., 0::2] = entity_boxes[..., 0::2] - x1 + entity_boxes_[..., 1::2] = entity_boxes[..., 1::2] - y1 + results['ann']['entity_boxes'] = entity_boxes_ + + if proposals is not None: + assert proposals.shape[-1] % 4 == 0 + proposals_ = proposals.copy() + proposals_[..., 0::2] = proposals[..., 0::2] - x1 + proposals_[..., 1::2] = proposals[..., 1::2] - y1 + results['proposals'] = proposals_ + return results + + +@PIPELINES.register_module() +class EntityBoxFlip: + """Flip the entity boxes and proposals with a probability. + + Reverse the order of elements in the given bounding boxes and proposals + with a specific direction. The shape of them are preserved, but the + elements are reordered. + + Required keys are "proposals", "img_shape", "ann.entity_boxes", added or + modified keys are "flip", "flip_direction", "ann.entity_boxes". If + "proposals" is not, it will also be modified. + + Args: + flip_ratio (float): Probability of implementing flip. Default: 0.5. + direction (str): Flip imgs horizontally or vertically. Options are + "horizontal" | "vertical". Default: "horizontal". + """ + + _directions = ['horizontal', 'vertical'] + + def __init__(self, flip_ratio=0.5, direction='horizontal'): + if direction not in self._directions: + raise ValueError(f'Direction {direction} is not supported. ' + f'Currently support ones are {self._directions}') + self.flip_ratio = flip_ratio + self.direction = direction + + def __call__(self, results): + flip = np.random.rand() < self.flip_ratio + + results['flip'] = flip + results['flip_direction'] = self.direction + + proposals = results['proposals'] + entity_boxes = results['ann']['entity_boxes'] + img_h, img_w = results['img_shape'] + + if flip: + if self.direction == 'horizontal': + assert entity_boxes.shape[-1] % 4 == 0 + entity_boxes_ = entity_boxes.copy() + entity_boxes_[..., 0::4] = img_w - entity_boxes[..., 2::4] - 1 + entity_boxes_[..., 2::4] = img_w - entity_boxes[..., 0::4] - 1 + if proposals is not None: + assert proposals.shape[-1] % 4 == 0 + proposals_ = proposals.copy() + proposals_[..., 0::4] = img_w - proposals[..., 2::4] - 1 + proposals_[..., 2::4] = img_w - proposals[..., 0::4] - 1 + else: + proposals_ = None + else: + assert entity_boxes.shape[-1] % 4 == 0 + entity_boxes_ = entity_boxes.copy() + entity_boxes_[..., 1::4] = img_h - entity_boxes[..., 3::4] - 1 + entity_boxes_[..., 3::4] = img_h - entity_boxes[..., 1::4] - 1 + if proposals is not None: + assert proposals.shape[-1] % 4 == 0 + proposals_ = proposals.copy() + proposals_[..., 1::4] = img_h - proposals[..., 3::4] - 1 + proposals_[..., 3::4] = img_h - proposals[..., 1::4] - 1 + else: + proposals_ = None + + results['proposals'] = proposals_ + results['ann']['entity_boxes'] = entity_boxes_ + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'flip_ratio={self.flip_ratio}, ' + f'direction={self.direction})') + return repr_str + + +@PIPELINES.register_module() +class EntityBoxClip: + """Clip (limit) the values in the entity boxes and proposals. + + Required keys are "img_shape", "proposals" and "ann.entity_boxes", added or + modified keys are "ann.entity_boxes". If "proposals" is None, it will also + be modified. + """ + + def __call__(self, results): + proposals = results['proposals'] + entity_boxes = results['ann']['entity_boxes'] + img_h, img_w = results['img_shape'] + + entity_boxes[:, 0::2] = np.clip(entity_boxes[:, 0::2], 0, img_w - 1) + entity_boxes[:, 1::2] = np.clip(entity_boxes[:, 1::2], 0, img_h - 1) + if proposals is not None: + proposals[:, 0::2] = np.clip(proposals[:, 0::2], 0, img_w - 1) + proposals[:, 1::2] = np.clip(proposals[:, 1::2], 0, img_h - 1) + + results['ann']['entity_boxes'] = entity_boxes + results['proposals'] = proposals + return results + + +@PIPELINES.register_module() +class EntityBoxPad: + """Pad entity boxes and proposals with zeros. + + Required keys are "proposals" and "ann.entity_boxes", added or modified + keys are "ann.entity_boxes". If "proposals" is not None, it is also + modified. + + Args: + max_num_gts (int | None): maximum of ground truth proposals. + Default: None. + """ + + def __init__(self, max_num_gts=None): + self.max_num_gts = max_num_gts + + def __call__(self, results): + if self.max_num_gts is None: + return results + + proposals = results['proposals'] + entity_boxes = results['ann']['entity_boxes'] + num_gts = entity_boxes.shape[0] + + padded_entity_boxes = np.zeros((self.max_num_gts, 4), dtype=np.float32) + padded_entity_boxes[:num_gts, :] = entity_boxes + if proposals is not None: + padded_proposals = np.zeros((self.max_num_gts, 4), + dtype=np.float32) + padded_proposals[:num_gts, :] = proposals + else: + padded_proposals = None + + results['proposals'] = padded_proposals + results['ann']['entity_boxes'] = padded_entity_boxes + return results + + def __repr__(self): + repr_str = f'{self.__class__.__name__}(max_num_gts={self.max_num_gts})' + return repr_str + + +@PIPELINES.register_module() +class RandomCrop: + """Vanilla square random crop that specifics the output size. + + Required keys in results are "imgs" and "img_shape", added or + modified keys are "imgs", "lazy"; Required keys in "lazy" are "flip", + "crop_bbox", added or modified key is "crop_bbox". + + Args: + size (int): The output size of the images. + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + + def __init__(self, size, lazy=False): + if not isinstance(size, int): + raise TypeError(f'Size must be an int, but got {type(size)}') + self.size = size + self.lazy = lazy + + def __call__(self, results): + """Performs the RandomCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, self.lazy) + + img_h, img_w = results['img_shape'] + assert self.size <= img_h and self.size <= img_w + + y_offset = 0 + x_offset = 0 + if img_h > self.size: + y_offset = int(np.random.randint(0, img_h - self.size)) + if img_w > self.size: + x_offset = int(np.random.randint(0, img_w - self.size)) + + new_h, new_w = self.size, self.size + + results['crop_bbox'] = np.array( + [x_offset, y_offset, x_offset + new_w, y_offset + new_h]) + results['img_shape'] = (new_h, new_w) + + if not self.lazy: + results['imgs'] = [ + img[y_offset:y_offset + new_h, x_offset:x_offset + new_w] + for img in results['imgs'] + ] + else: + lazyop = results['lazy'] + if lazyop['flip']: + raise NotImplementedError('Put Flip at last for now') + + # record crop_bbox in lazyop dict to ensure only crop once in Fuse + lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] + left = x_offset * (lazy_right - lazy_left) / img_w + right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w + top = y_offset * (lazy_bottom - lazy_top) / img_h + bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h + lazyop['crop_bbox'] = np.array([(lazy_left + left), + (lazy_top + top), + (lazy_left + right), + (lazy_top + bottom)], + dtype=np.float32) + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(size={self.size}, ' + f'lazy={self.lazy})') + return repr_str + + +@PIPELINES.register_module() +class RandomResizedCrop: + """Random crop that specifics the area and height-weight ratio range. + + Required keys in results are "imgs", "img_shape", "crop_bbox" and "lazy", + added or modified keys are "imgs", "crop_bbox" and "lazy"; Required keys + in "lazy" are "flip", "crop_bbox", added or modified key is "crop_bbox". + + Args: + area_range (Tuple[float]): The candidate area scales range of + output cropped images. Default: (0.08, 1.0). + aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of + output cropped images. Default: (3 / 4, 4 / 3). + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + + def __init__(self, + area_range=(0.08, 1.0), + aspect_ratio_range=(3 / 4, 4 / 3), + lazy=False): + self.area_range = area_range + self.aspect_ratio_range = aspect_ratio_range + self.lazy = lazy + if not mmcv.is_tuple_of(self.area_range, float): + raise TypeError(f'Area_range must be a tuple of float, ' + f'but got {type(area_range)}') + if not mmcv.is_tuple_of(self.aspect_ratio_range, float): + raise TypeError(f'Aspect_ratio_range must be a tuple of float, ' + f'but got {type(aspect_ratio_range)}') + + @staticmethod + def get_crop_bbox(img_shape, + area_range, + aspect_ratio_range, + max_attempts=10): + """Get a crop bbox given the area range and aspect ratio range. + + Args: + img_shape (Tuple[int]): Image shape + area_range (Tuple[float]): The candidate area scales range of + output cropped images. Default: (0.08, 1.0). + aspect_ratio_range (Tuple[float]): The candidate aspect + ratio range of output cropped images. Default: (3 / 4, 4 / 3). + max_attempts (int): The maximum of attempts. Default: 10. + max_attempts (int): Max attempts times to generate random candidate + bounding box. If it doesn't qualified one, the center bounding + box will be used. + Returns: + (list[int]) A random crop bbox within the area range and aspect + ratio range. + """ + assert 0 < area_range[0] <= area_range[1] <= 1 + assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1] + + img_h, img_w = img_shape + area = img_h * img_w + + min_ar, max_ar = aspect_ratio_range + aspect_ratios = np.exp( + np.random.uniform( + np.log(min_ar), np.log(max_ar), size=max_attempts)) + target_areas = np.random.uniform(*area_range, size=max_attempts) * area + candidate_crop_w = np.round(np.sqrt(target_areas * + aspect_ratios)).astype(np.int32) + candidate_crop_h = np.round(np.sqrt(target_areas / + aspect_ratios)).astype(np.int32) + + for i in range(max_attempts): + crop_w = candidate_crop_w[i] + crop_h = candidate_crop_h[i] + if crop_h <= img_h and crop_w <= img_w: + x_offset = random.randint(0, img_w - crop_w) + y_offset = random.randint(0, img_h - crop_h) + return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h + + # Fallback + crop_size = min(img_h, img_w) + x_offset = (img_w - crop_size) // 2 + y_offset = (img_h - crop_size) // 2 + return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size + + def __call__(self, results): + """Performs the RandomResizeCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, self.lazy) + + img_h, img_w = results['img_shape'] + + left, top, right, bottom = self.get_crop_bbox( + (img_h, img_w), self.area_range, self.aspect_ratio_range) + new_h, new_w = bottom - top, right - left + + results['crop_bbox'] = np.array([left, top, right, bottom]) + results['img_shape'] = (new_h, new_w) + + if not self.lazy: + results['imgs'] = [ + img[top:bottom, left:right] for img in results['imgs'] + ] + else: + lazyop = results['lazy'] + if lazyop['flip']: + raise NotImplementedError('Put Flip at last for now') + + # record crop_bbox in lazyop dict to ensure only crop once in Fuse + lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] + left = left * (lazy_right - lazy_left) / img_w + right = right * (lazy_right - lazy_left) / img_w + top = top * (lazy_bottom - lazy_top) / img_h + bottom = bottom * (lazy_bottom - lazy_top) / img_h + lazyop['crop_bbox'] = np.array([(lazy_left + left), + (lazy_top + top), + (lazy_left + right), + (lazy_top + bottom)], + dtype=np.float32) + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'area_range={self.area_range}, ' + f'aspect_ratio_range={self.aspect_ratio_range}, ' + f'lazy={self.lazy})') + return repr_str + + +@PIPELINES.register_module() +class MultiScaleCrop: + """Crop images with a list of randomly selected scales. + + Randomly select the w and h scales from a list of scales. Scale of 1 means + the base size, which is the minimal of image weight and height. The scale + level of w and h is controlled to be smaller than a certain value to + prevent too large or small aspect ratio. + Required keys are "imgs", "img_shape", added or modified keys are "imgs", + "crop_bbox", "img_shape", "lazy" and "scales". Required keys in "lazy" are + "crop_bbox", added or modified key is "crop_bbox". + + Args: + input_size (int | tuple[int]): (w, h) of network input. + scales (tuple[float]): Weight and height scales to be selected. + max_wh_scale_gap (int): Maximum gap of w and h scale levels. + Default: 1. + random_crop (bool): If set to True, the cropping bbox will be randomly + sampled, otherwise it will be sampler from fixed regions. + Default: False. + num_fixed_crops (int): If set to 5, the cropping bbox will keep 5 + basic fixed regions: "upper left", "upper right", "lower left", + "lower right", "center". If set to 13, the cropping bbox will + append another 8 fix regions: "center left", "center right", + "lower center", "upper center", "upper left quarter", + "upper right quarter", "lower left quarter", "lower right quarter". + Default: 5. + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + + def __init__(self, + input_size, + scales=(1, ), + max_wh_scale_gap=1, + random_crop=False, + num_fixed_crops=5, + lazy=False): + self.input_size = _pair(input_size) + if not mmcv.is_tuple_of(self.input_size, int): + raise TypeError(f'Input_size must be int or tuple of int, ' + f'but got {type(input_size)}') + + if not isinstance(scales, tuple): + raise TypeError(f'Scales must be tuple, but got {type(scales)}') + + if num_fixed_crops not in [5, 13]: + raise ValueError(f'Num_fix_crops must be in {[5, 13]}, ' + f'but got {num_fixed_crops}') + + self.scales = scales + self.max_wh_scale_gap = max_wh_scale_gap + self.random_crop = random_crop + self.num_fixed_crops = num_fixed_crops + self.lazy = lazy + + def __call__(self, results): + """Performs the MultiScaleCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, self.lazy) + + img_h, img_w = results['img_shape'] + base_size = min(img_h, img_w) + crop_sizes = [int(base_size * s) for s in self.scales] + + candidate_sizes = [] + for i, h in enumerate(crop_sizes): + for j, w in enumerate(crop_sizes): + if abs(i - j) <= self.max_wh_scale_gap: + candidate_sizes.append([w, h]) + + crop_size = random.choice(candidate_sizes) + for i in range(2): + if abs(crop_size[i] - self.input_size[i]) < 3: + crop_size[i] = self.input_size[i] + + crop_w, crop_h = crop_size + + if self.random_crop: + x_offset = random.randint(0, img_w - crop_w) + y_offset = random.randint(0, img_h - crop_h) + else: + w_step = (img_w - crop_w) // 4 + h_step = (img_h - crop_h) // 4 + candidate_offsets = [ + (0, 0), # upper left + (4 * w_step, 0), # upper right + (0, 4 * h_step), # lower left + (4 * w_step, 4 * h_step), # lower right + (2 * w_step, 2 * h_step), # center + ] + if self.num_fixed_crops == 13: + extra_candidate_offsets = [ + (0, 2 * h_step), # center left + (4 * w_step, 2 * h_step), # center right + (2 * w_step, 4 * h_step), # lower center + (2 * w_step, 0 * h_step), # upper center + (1 * w_step, 1 * h_step), # upper left quarter + (3 * w_step, 1 * h_step), # upper right quarter + (1 * w_step, 3 * h_step), # lower left quarter + (3 * w_step, 3 * h_step) # lower right quarter + ] + candidate_offsets.extend(extra_candidate_offsets) + x_offset, y_offset = random.choice(candidate_offsets) + + new_h, new_w = crop_h, crop_w + + results['crop_bbox'] = np.array( + [x_offset, y_offset, x_offset + new_w, y_offset + new_h]) + results['img_shape'] = (new_h, new_w) + results['scales'] = self.scales + + if not self.lazy: + results['imgs'] = [ + img[y_offset:y_offset + new_h, x_offset:x_offset + new_w] + for img in results['imgs'] + ] + else: + lazyop = results['lazy'] + if lazyop['flip']: + raise NotImplementedError('Put Flip at last for now') + + # record crop_bbox in lazyop dict to ensure only crop once in Fuse + lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] + left = x_offset * (lazy_right - lazy_left) / img_w + right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w + top = y_offset * (lazy_bottom - lazy_top) / img_h + bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h + lazyop['crop_bbox'] = np.array([(lazy_left + left), + (lazy_top + top), + (lazy_left + right), + (lazy_top + bottom)], + dtype=np.float32) + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'input_size={self.input_size}, scales={self.scales}, ' + f'max_wh_scale_gap={self.max_wh_scale_gap}, ' + f'random_crop={self.random_crop}, ' + f'num_fixed_crops={self.num_fixed_crops}, ' + f'lazy={self.lazy})') + return repr_str + + +@PIPELINES.register_module() +class Resize: + """Resize images to a specific size. + + Required keys are "imgs", "img_shape", "modality", added or modified + keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy", + "resize_size". Required keys in "lazy" is None, added or modified key is + "interpolation". + + Args: + scale (float | Tuple[int]): If keep_ratio is True, it serves as scaling + factor or maximum size: + If it is a float number, the image will be rescaled by this + factor, else if it is a tuple of 2 integers, the image will + be rescaled as large as possible within the scale. + Otherwise, it serves as (w, h) of output size. + keep_ratio (bool): If set to True, Images will be resized without + changing the aspect ratio. Otherwise, it will resize images to a + given size. Default: True. + interpolation (str): Algorithm used for interpolation: + "nearest" | "bilinear". Default: "bilinear". + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + + def __init__(self, + scale, + keep_ratio=True, + interpolation='bilinear', + lazy=False): + if isinstance(scale, float): + if scale <= 0: + raise ValueError(f'Invalid scale {scale}, must be positive.') + elif isinstance(scale, tuple): + max_long_edge = max(scale) + max_short_edge = min(scale) + if max_short_edge == -1: + # assign np.inf to long edge for rescaling short edge later. + scale = (np.inf, max_long_edge) + else: + raise TypeError( + f'Scale must be float or tuple of int, but got {type(scale)}') + self.scale = scale + self.keep_ratio = keep_ratio + self.interpolation = interpolation + self.lazy = lazy + + def __call__(self, results): + """Performs the Resize augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + + _init_lazy_if_proper(results, self.lazy) + + if 'scale_factor' not in results: + results['scale_factor'] = np.array([1, 1], dtype=np.float32) + img_h, img_w = results['img_shape'] + + if self.keep_ratio: + new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale) + else: + new_w, new_h = self.scale + + self.scale_factor = np.array([new_w / img_w, new_h / img_h], + dtype=np.float32) + + results['img_shape'] = (new_h, new_w) + results['keep_ratio'] = self.keep_ratio + results['scale_factor'] = results['scale_factor'] * self.scale_factor + + if not self.lazy: + results['imgs'] = [ + mmcv.imresize( + img, (new_w, new_h), interpolation=self.interpolation) + for img in results['imgs'] + ] + else: + lazyop = results['lazy'] + if lazyop['flip']: + raise NotImplementedError('Put Flip at last for now') + lazyop['interpolation'] = self.interpolation + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'scale={self.scale}, keep_ratio={self.keep_ratio}, ' + f'interpolation={self.interpolation}, ' + f'lazy={self.lazy})') + return repr_str + + +@PIPELINES.register_module() +class RandomRescale: + """Randomly resize images so that the short_edge is resized to a specific + size in a given range. The scale ratio is unchanged after resizing. + + Required keys are "imgs", "img_shape", "modality", added or modified + keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "resize_size", + "short_edge". + + Args: + scale_range (tuple[int]): The range of short edge length. A closed + interval. + interpolation (str): Algorithm used for interpolation: + "nearest" | "bilinear". Default: "bilinear". + """ + + def __init__(self, scale_range, interpolation='bilinear'): + self.scale_range = scale_range + # make sure scale_range is legal, first make sure the type is OK + assert mmcv.is_tuple_of(scale_range, int) + assert len(scale_range) == 2 + assert scale_range[0] < scale_range[1] + assert np.all([x > 0 for x in scale_range]) + + self.keep_ratio = True + self.interpolation = interpolation + + def __call__(self, results): + """Performs the Resize augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + short_edge = np.random.randint(self.scale_range[0], + self.scale_range[1] + 1) + resize = Resize((-1, short_edge), + keep_ratio=True, + interpolation=self.interpolation, + lazy=False) + results = resize(results) + + results['short_edge'] = short_edge + return results + + def __repr__(self): + scale_range = self.scale_range + repr_str = (f'{self.__class__.__name__}(' + f'scale_range=({scale_range[0]}, {scale_range[1]}), ' + f'interpolation={self.interpolation})') + return repr_str + + +@PIPELINES.register_module() +class Flip: + """Flip the input images with a probability. + + Reverse the order of elements in the given imgs with a specific direction. + The shape of the imgs is preserved, but the elements are reordered. + Required keys are "imgs", "img_shape", "modality", added or modified + keys are "imgs", "lazy" and "flip_direction". Required keys in "lazy" is + None, added or modified key are "flip" and "flip_direction". + + Args: + flip_ratio (float): Probability of implementing flip. Default: 0.5. + direction (str): Flip imgs horizontally or vertically. Options are + "horizontal" | "vertical". Default: "horizontal". + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + _directions = ['horizontal', 'vertical'] + + def __init__(self, flip_ratio=0.5, direction='horizontal', lazy=False): + if direction not in self._directions: + raise ValueError(f'Direction {direction} is not supported. ' + f'Currently support ones are {self._directions}') + self.flip_ratio = flip_ratio + self.direction = direction + self.lazy = lazy + + def __call__(self, results): + """Performs the Flip augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, self.lazy) + modality = results['modality'] + if modality == 'Flow': + assert self.direction == 'horizontal' + + flip = np.random.rand() < self.flip_ratio + + results['flip'] = flip + results['flip_direction'] = self.direction + + if not self.lazy: + if flip: + for i, img in enumerate(results['imgs']): + mmcv.imflip_(img, self.direction) + lt = len(results['imgs']) + for i in range(0, lt, 2): + # flow with even indexes are x_flow, which need to be + # inverted when doing horizontal flip + if modality == 'Flow': + results['imgs'][i] = mmcv.iminvert(results['imgs'][i]) + + else: + results['imgs'] = list(results['imgs']) + else: + lazyop = results['lazy'] + if lazyop['flip']: + raise NotImplementedError('Use one Flip please') + lazyop['flip'] = flip + lazyop['flip_direction'] = self.direction + + return results + + def __repr__(self): + repr_str = ( + f'{self.__class__.__name__}(' + f'flip_ratio={self.flip_ratio}, direction={self.direction}, ' + f'lazy={self.lazy})') + return repr_str + + +@PIPELINES.register_module() +class Normalize: + """Normalize images with the given mean and std value. + + Required keys are "imgs", "img_shape", "modality", added or modified + keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional + keys "scale_factor" is required + + Args: + mean (Sequence[float]): Mean values of different channels. + std (Sequence[float]): Std values of different channels. + to_bgr (bool): Whether to convert channels from RGB to BGR. + Default: False. + adjust_magnitude (bool): Indicate whether to adjust the flow magnitude + on 'scale_factor' when modality is 'Flow'. Default: False. + """ + + def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False): + if not isinstance(mean, Sequence): + raise TypeError( + f'Mean must be list, tuple or np.ndarray, but got {type(mean)}' + ) + + if not isinstance(std, Sequence): + raise TypeError( + f'Std must be list, tuple or np.ndarray, but got {type(std)}') + + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_bgr = to_bgr + self.adjust_magnitude = adjust_magnitude + + def __call__(self, results): + modality = results['modality'] + + if modality == 'RGB': + n = len(results['imgs']) + h, w, c = results['imgs'][0].shape + imgs = np.empty((n, h, w, c), dtype=np.float32) + for i, img in enumerate(results['imgs']): + imgs[i] = img + + for img in imgs: + mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr) + + results['imgs'] = imgs + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_bgr=self.to_bgr) + return results + if modality == 'Flow': + num_imgs = len(results['imgs']) + assert num_imgs % 2 == 0 + assert self.mean.shape[0] == 2 + assert self.std.shape[0] == 2 + n = num_imgs // 2 + h, w = results['imgs'][0].shape + x_flow = np.empty((n, h, w), dtype=np.float32) + y_flow = np.empty((n, h, w), dtype=np.float32) + for i in range(n): + x_flow[i] = results['imgs'][2 * i] + y_flow[i] = results['imgs'][2 * i + 1] + x_flow = (x_flow - self.mean[0]) / self.std[0] + y_flow = (y_flow - self.mean[1]) / self.std[1] + if self.adjust_magnitude: + x_flow = x_flow * results['scale_factor'][0] + y_flow = y_flow * results['scale_factor'][1] + imgs = np.stack([x_flow, y_flow], axis=-1) + results['imgs'] = imgs + args = dict( + mean=self.mean, + std=self.std, + to_bgr=self.to_bgr, + adjust_magnitude=self.adjust_magnitude) + results['img_norm_cfg'] = args + return results + raise NotImplementedError + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'mean={self.mean}, ' + f'std={self.std}, ' + f'to_bgr={self.to_bgr}, ' + f'adjust_magnitude={self.adjust_magnitude})') + return repr_str + + +@PIPELINES.register_module() +class ColorJitter: + """Randomly distort the brightness, contrast, saturation and hue of images, + and add PCA based noise into images. + + Note: The input images should be in RGB channel order. + + Code Reference: + https://gluon-cv.mxnet.io/_modules/gluoncv/data/transforms/experimental/image.html + https://mxnet.apache.org/api/python/docs/_modules/mxnet/image/image.html#LightingAug + + If specified to apply color space augmentation, it will distort the image + color space by changing brightness, contrast and saturation. Then, it will + add some random distort to the images in different color channels. + Note that the input images should be in original range [0, 255] and in RGB + channel sequence. + + Required keys are "imgs", added or modified keys are "imgs", "eig_val", + "eig_vec", "alpha_std" and "color_space_aug". + + Args: + color_space_aug (bool): Whether to apply color space augmentations. If + specified, it will change the brightness, contrast, saturation and + hue of images, then add PCA based noise to images. Otherwise, it + will directly add PCA based noise to images. Default: False. + alpha_std (float): Std in the normal Gaussian distribution of alpha. + eig_val (np.ndarray | None): Eigenvalues of [1 x 3] size for RGB + channel jitter. If set to None, it will use the default + eigenvalues. Default: None. + eig_vec (np.ndarray | None): Eigenvectors of [3 x 3] size for RGB + channel jitter. If set to None, it will use the default + eigenvectors. Default: None. + """ + + def __init__(self, + color_space_aug=False, + alpha_std=0.1, + eig_val=None, + eig_vec=None): + if eig_val is None: + # note that the data range should be [0, 255] + self.eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32) + else: + self.eig_val = eig_val + + if eig_vec is None: + self.eig_vec = np.array([[-0.5675, 0.7192, 0.4009], + [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + dtype=np.float32) + else: + self.eig_vec = eig_vec + + self.alpha_std = alpha_std + self.color_space_aug = color_space_aug + + @staticmethod + def brightness(img, delta): + """Brightness distortion. + + Args: + img (np.ndarray): An input image. + delta (float): Delta value to distort brightness. + It ranges from [-32, 32). + + Returns: + np.ndarray: A brightness distorted image. + """ + if np.random.rand() > 0.5: + img = img + np.float32(delta) + return img + + @staticmethod + def contrast(img, alpha): + """Contrast distortion. + + Args: + img (np.ndarray): An input image. + alpha (float): Alpha value to distort contrast. + It ranges from [0.6, 1.4). + + Returns: + np.ndarray: A contrast distorted image. + """ + if np.random.rand() > 0.5: + img = img * np.float32(alpha) + return img + + @staticmethod + def saturation(img, alpha): + """Saturation distortion. + + Args: + img (np.ndarray): An input image. + alpha (float): Alpha value to distort the saturation. + It ranges from [0.6, 1.4). + + Returns: + np.ndarray: A saturation distorted image. + """ + if np.random.rand() > 0.5: + gray = img * np.array([0.299, 0.587, 0.114], dtype=np.float32) + gray = np.sum(gray, 2, keepdims=True) + gray *= (1.0 - alpha) + img = img * alpha + img = img + gray + return img + + @staticmethod + def hue(img, alpha): + """Hue distortion. + + Args: + img (np.ndarray): An input image. + alpha (float): Alpha value to control the degree of rotation + for hue. It ranges from [-18, 18). + + Returns: + np.ndarray: A hue distorted image. + """ + if np.random.rand() > 0.5: + u = np.cos(alpha * np.pi) + w = np.sin(alpha * np.pi) + bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]], + dtype=np.float32) + tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321], + [0.211, -0.523, 0.311]], + dtype=np.float32) + ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647], + [1.0, -1.107, 1.705]], + dtype=np.float32) + t = np.dot(np.dot(ityiq, bt), tyiq).T + t = np.array(t, dtype=np.float32) + img = np.dot(img, t) + return img + + def __call__(self, results): + imgs = results['imgs'] + out = [] + if self.color_space_aug: + bright_delta = np.random.uniform(-32, 32) + contrast_alpha = np.random.uniform(0.6, 1.4) + saturation_alpha = np.random.uniform(0.6, 1.4) + hue_alpha = np.random.uniform(-18, 18) + jitter_coin = np.random.rand() + for img in imgs: + img = self.brightness(img, delta=bright_delta) + if jitter_coin > 0.5: + img = self.contrast(img, alpha=contrast_alpha) + img = self.saturation(img, alpha=saturation_alpha) + img = self.hue(img, alpha=hue_alpha) + else: + img = self.saturation(img, alpha=saturation_alpha) + img = self.hue(img, alpha=hue_alpha) + img = self.contrast(img, alpha=contrast_alpha) + out.append(img) + else: + out = imgs + + # Add PCA based noise + alpha = np.random.normal(0, self.alpha_std, size=(3, )) + rgb = np.array( + np.dot(self.eig_vec * alpha, self.eig_val), dtype=np.float32) + rgb = rgb[None, None, ...] + + results['imgs'] = [img + rgb for img in out] + results['eig_val'] = self.eig_val + results['eig_vec'] = self.eig_vec + results['alpha_std'] = self.alpha_std + results['color_space_aug'] = self.color_space_aug + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'color_space_aug={self.color_space_aug}, ' + f'alpha_std={self.alpha_std}, ' + f'eig_val={self.eig_val}, ' + f'eig_vec={self.eig_vec})') + return repr_str + + +@PIPELINES.register_module() +class CenterCrop: + """Crop the center area from images. + + Required keys are "imgs", "img_shape", added or modified keys are "imgs", + "crop_bbox", "lazy" and "img_shape". Required keys in "lazy" is + "crop_bbox", added or modified key is "crop_bbox". + + Args: + crop_size (int | tuple[int]): (w, h) of crop size. + lazy (bool): Determine whether to apply lazy operation. Default: False. + """ + + def __init__(self, crop_size, lazy=False): + self.crop_size = _pair(crop_size) + self.lazy = lazy + if not mmcv.is_tuple_of(self.crop_size, int): + raise TypeError(f'Crop_size must be int or tuple of int, ' + f'but got {type(crop_size)}') + + def __call__(self, results): + """Performs the CenterCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, self.lazy) + + img_h, img_w = results['img_shape'] + crop_w, crop_h = self.crop_size + + left = (img_w - crop_w) // 2 + top = (img_h - crop_h) // 2 + right = left + crop_w + bottom = top + crop_h + new_h, new_w = bottom - top, right - left + + results['crop_bbox'] = np.array([left, top, right, bottom]) + results['img_shape'] = (new_h, new_w) + + if not self.lazy: + results['imgs'] = [ + img[top:bottom, left:right] for img in results['imgs'] + ] + else: + lazyop = results['lazy'] + if lazyop['flip']: + raise NotImplementedError('Put Flip at last for now') + + # record crop_bbox in lazyop dict to ensure only crop once in Fuse + lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox'] + left = left * (lazy_right - lazy_left) / img_w + right = right * (lazy_right - lazy_left) / img_w + top = top * (lazy_bottom - lazy_top) / img_h + bottom = bottom * (lazy_bottom - lazy_top) / img_h + lazyop['crop_bbox'] = np.array([(lazy_left + left), + (lazy_top + top), + (lazy_left + right), + (lazy_top + bottom)], + dtype=np.float32) + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(crop_size={self.crop_size}, ' + f'lazy={self.lazy})') + return repr_str + + +@PIPELINES.register_module() +class ThreeCrop: + """Crop images into three crops. + + Crop the images equally into three crops with equal intervals along the + shorter side. + Required keys are "imgs", "img_shape", added or modified keys are "imgs", + "crop_bbox" and "img_shape". + + Args: + crop_size(int | tuple[int]): (w, h) of crop size. + """ + + def __init__(self, crop_size): + self.crop_size = _pair(crop_size) + if not mmcv.is_tuple_of(self.crop_size, int): + raise TypeError(f'Crop_size must be int or tuple of int, ' + f'but got {type(crop_size)}') + + def __call__(self, results): + """Performs the ThreeCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, False) + + imgs = results['imgs'] + img_h, img_w = results['imgs'][0].shape[:2] + crop_w, crop_h = self.crop_size + assert crop_h == img_h or crop_w == img_w + + if crop_h == img_h: + w_step = (img_w - crop_w) // 2 + offsets = [ + (0, 0), # left + (2 * w_step, 0), # right + (w_step, 0), # middle + ] + elif crop_w == img_w: + h_step = (img_h - crop_h) // 2 + offsets = [ + (0, 0), # top + (0, 2 * h_step), # down + (0, h_step), # middle + ] + + cropped = [] + crop_bboxes = [] + for x_offset, y_offset in offsets: + bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h] + crop = [ + img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w] + for img in imgs + ] + cropped.extend(crop) + crop_bboxes.extend([bbox for _ in range(len(imgs))]) + + crop_bboxes = np.array(crop_bboxes) + results['imgs'] = cropped + results['crop_bbox'] = crop_bboxes + results['img_shape'] = results['imgs'][0].shape[:2] + + return results + + def __repr__(self): + repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})' + return repr_str + + +@PIPELINES.register_module() +class TenCrop: + """Crop the images into 10 crops (corner + center + flip). + + Crop the four corners and the center part of the image with the same + given crop_size, and flip it horizontally. + Required keys are "imgs", "img_shape", added or modified keys are "imgs", + "crop_bbox" and "img_shape". + + Args: + crop_size(int | tuple[int]): (w, h) of crop size. + """ + + def __init__(self, crop_size): + self.crop_size = _pair(crop_size) + if not mmcv.is_tuple_of(self.crop_size, int): + raise TypeError(f'Crop_size must be int or tuple of int, ' + f'but got {type(crop_size)}') + + def __call__(self, results): + """Performs the TenCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + _init_lazy_if_proper(results, False) + + imgs = results['imgs'] + + img_h, img_w = results['imgs'][0].shape[:2] + crop_w, crop_h = self.crop_size + + w_step = (img_w - crop_w) // 4 + h_step = (img_h - crop_h) // 4 + + offsets = [ + (0, 0), # upper left + (4 * w_step, 0), # upper right + (0, 4 * h_step), # lower left + (4 * w_step, 4 * h_step), # lower right + (2 * w_step, 2 * h_step), # center + ] + + img_crops = list() + crop_bboxes = list() + for x_offset, y_offsets in offsets: + crop = [ + img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w] + for img in imgs + ] + flip_crop = [np.flip(c, axis=1).copy() for c in crop] + bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h] + img_crops.extend(crop) + img_crops.extend(flip_crop) + crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)]) + + crop_bboxes = np.array(crop_bboxes) + results['imgs'] = img_crops + results['crop_bbox'] = crop_bboxes + results['img_shape'] = results['imgs'][0].shape[:2] + + return results + + def __repr__(self): + repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})' + return repr_str + + +@PIPELINES.register_module() +class MultiGroupCrop: + """Randomly crop the images into several groups. + + Crop the random region with the same given crop_size and bounding box + into several groups. + Required keys are "imgs", added or modified keys are "imgs", "crop_bbox" + and "img_shape". + + Args: + crop_size(int | tuple[int]): (w, h) of crop size. + groups(int): Number of groups. + """ + + def __init__(self, crop_size, groups): + self.crop_size = _pair(crop_size) + self.groups = groups + if not mmcv.is_tuple_of(self.crop_size, int): + raise TypeError('Crop size must be int or tuple of int, ' + f'but got {type(crop_size)}') + + if not isinstance(groups, int): + raise TypeError(f'Groups must be int, but got {type(groups)}.') + + if groups <= 0: + raise ValueError('Groups must be positive.') + + def __call__(self, results): + """Performs the MultiGroupCrop augmentation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + imgs = results['imgs'] + img_h, img_w = imgs[0].shape[:2] + crop_w, crop_h = self.crop_size + + img_crops = [] + crop_bboxes = [] + for _ in range(self.groups): + x_offset = random.randint(0, img_w - crop_w) + y_offset = random.randint(0, img_h - crop_h) + + bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h] + crop = [ + img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w] + for img in imgs + ] + img_crops.extend(crop) + crop_bboxes.extend([bbox for _ in range(len(imgs))]) + + crop_bboxes = np.array(crop_bboxes) + results['imgs'] = img_crops + results['crop_bbox'] = crop_bboxes + results['img_shape'] = results['imgs'][0].shape[:2] + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}' + f'(crop_size={self.crop_size}, ' + f'groups={self.groups})') + return repr_str + + +@PIPELINES.register_module() +class AudioAmplify: + """Amplify the waveform. + + Required keys are "audios", added or modified keys are "audios", + "amplify_ratio". + + Args: + ratio (float): The ratio used to amplify the audio waveform. + """ + + def __init__(self, ratio): + if isinstance(ratio, float): + self.ratio = ratio + else: + raise TypeError('Amplification ratio should be float.') + + def __call__(self, results): + """Perfrom the audio amplification. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + + assert 'audios' in results + results['audios'] *= self.ratio + results['amplify_ratio'] = self.ratio + + return results + + def __repr__(self): + repr_str = f'{self.__class__.__name__}(ratio={self.ratio})' + return repr_str + + +@PIPELINES.register_module() +class MelSpectrogram: + """MelSpectrogram. Transfer an audio wave into a melspectogram figure. + + Required keys are "audios", "sample_rate", "num_clips", added or modified + keys are "audios". + + Args: + window_size (int): The window size in milisecond. Default: 32. + step_size (int): The step size in milisecond. Default: 16. + n_mels (int): Number of mels. Default: 80. + fixed_length (int): The sample length of melspectrogram maybe not + exactly as wished due to different fps, fix the length for batch + collation by truncating or padding. Default: 128. + """ + + def __init__(self, + window_size=32, + step_size=16, + n_mels=80, + fixed_length=128): + if all( + isinstance(x, int) + for x in [window_size, step_size, n_mels, fixed_length]): + self.window_size = window_size + self.step_size = step_size + self.n_mels = n_mels + self.fixed_length = fixed_length + else: + raise TypeError('All arguments should be int.') + + def __call__(self, results): + """Perform MelSpectrogram transformation. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + try: + import librosa + except ImportError: + raise ImportError('Install librosa first.') + signals = results['audios'] + sample_rate = results['sample_rate'] + n_fft = int(round(sample_rate * self.window_size / 1000)) + hop_length = int(round(sample_rate * self.step_size / 1000)) + melspectrograms = list() + for clip_idx in range(results['num_clips']): + clip_signal = signals[clip_idx] + mel = librosa.feature.melspectrogram( + y=clip_signal, + sr=sample_rate, + n_fft=n_fft, + hop_length=hop_length, + n_mels=self.n_mels) + if mel.shape[0] >= self.fixed_length: + mel = mel[:self.fixed_length, :] + else: + mel = np.pad( + mel, ((0, mel.shape[-1] - self.fixed_length), (0, 0)), + mode='edge') + melspectrograms.append(mel) + + results['audios'] = np.array(melspectrograms) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}' + f'(window_size={self.window_size}), ' + f'step_size={self.step_size}, ' + f'n_mels={self.n_mels}, ' + f'fixed_length={self.fixed_length})') + return repr_str diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/compose.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/compose.py new file mode 100644 index 0000000..f6f557a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/compose.py @@ -0,0 +1,52 @@ +from collections.abc import Sequence + +from mmcv.utils import build_from_cfg + +from ..registry import PIPELINES + + +@PIPELINES.register_module() +class Compose: + """Compose a data pipeline with a sequence of transforms. + + Args: + transforms (list[dict | callable]): + Either config dicts of transforms or transform objects. + """ + + def __init__(self, transforms): + assert isinstance(transforms, Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError(f'transform must be callable or a dict, ' + f'but got {type(transform)}') + + def __call__(self, data): + """Call function to apply transforms sequentially. + + Args: + data (dict): A result dict contains the data to transform. + + Returns: + dict: Transformed data. + """ + + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/formating.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/formating.py new file mode 100644 index 0000000..fd5502f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/formating.py @@ -0,0 +1,321 @@ +from collections.abc import Sequence + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC + +from ..registry import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + """ + if isinstance(data, torch.Tensor): + return data + if isinstance(data, np.ndarray): + return torch.from_numpy(data) + if isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + if isinstance(data, int): + return torch.LongTensor([data]) + if isinstance(data, float): + return torch.FloatTensor([data]) + raise TypeError(f'type {type(data)} cannot be converted to tensor.') + + +@PIPELINES.register_module() +class ToTensor: + """Convert some values in results dict to `torch.Tensor` type in data + loader pipeline. + + Args: + keys (Sequence[str]): Required keys to be converted. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Performs the ToTensor formating. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return f'{self.__class__.__name__}(keys={self.keys})' + + +@PIPELINES.register_module() +class ToDataContainer: + """Convert the data to DataContainer. + + Args: + fields (Sequence[dict]): Required fields to be converted + with keys and attributes. E.g. + fields=(dict(key='gt_bbox', stack=False),). + """ + + def __init__(self, fields): + self.fields = fields + + def __call__(self, results): + """Performs the ToDataContainer formating. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + for field in self.fields: + _field = field.copy() + key = _field.pop('key') + results[key] = DC(results[key], **_field) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(fields={self.fields})' + + +@PIPELINES.register_module() +class ImageToTensor: + """Convert image type to `torch.Tensor` type. + + Args: + keys (Sequence[str]): Required keys to be converted. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Performs the ImageToTensor formating. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + for key in self.keys: + results[key] = to_tensor(results[key].transpose(2, 0, 1)) + return results + + def __repr__(self): + return f'{self.__class__.__name__}(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose: + """Transpose image channels to a given order. + + Args: + keys (Sequence[str]): Required keys to be converted. + order (Sequence[int]): Image channel order. + """ + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + """Performs the Transpose formatting. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return (f'{self.__class__.__name__}(' + f'keys={self.keys}, order={self.order})') + + +@PIPELINES.register_module() +class Collect: + """Collect data from the loader relevant to the specific task. + + This keeps the items in ``keys`` as it is, and collect items in + ``meta_keys`` into a meta item called ``meta_name``.This is usually + the last stage of the data loader pipeline. + For example, when keys='imgs', meta_keys=('filename', 'label', + 'original_shape'), meta_name='img_meta', the results will be a dict with + keys 'imgs' and 'img_meta', where 'img_meta' is a DataContainer of another + dict with keys 'filename', 'label', 'original_shape'. + + Args: + keys (Sequence[str]): Required keys to be collected. + meta_name (str): The name of the key that contains meta infomation. + This key is always populated. Default: "img_meta". + meta_keys (Sequence[str]): Keys that are collected under meta_name. + The contents of the ``meta_name`` dictionary depends on + ``meta_keys``. + By default this includes: + + - "filename": path to the image file + - "label": label of the image file + - "original_shape": original shape of the image as a tuple + (h, w, c) + - "img_shape": shape of the image input to the network as a tuple + (h, w, c). Note that images may be zero padded on the + bottom/right, if the batch tensor is larger than this shape. + - "pad_shape": image shape after padding + - "flip_direction": a str in ("horiziontal", "vertival") to + indicate if the image is fliped horizontally or vertically. + - "img_norm_cfg": a dict of normalization information: + - mean - per channel mean subtraction + - std - per channel std divisor + - to_rgb - bool indicating if bgr was converted to rgb + """ + + def __init__(self, + keys, + meta_keys=('filename', 'label', 'original_shape', 'img_shape', + 'pad_shape', 'flip_direction', 'img_norm_cfg'), + meta_name='img_meta'): + self.keys = keys + self.meta_keys = meta_keys + self.meta_name = meta_name + + def __call__(self, results): + """Performs the Collect formating. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + data = {} + for key in self.keys: + data[key] = results[key] + + if len(self.meta_keys) != 0: + meta = {} + for key in self.meta_keys: + meta[key] = results[key] + data[self.meta_name] = DC(meta, cpu_only=True) + + return data + + def __repr__(self): + return (f'{self.__class__.__name__}(' + f'keys={self.keys}, meta_keys={self.meta_keys})') + + +@PIPELINES.register_module() +class FormatShape: + """Format final imgs shape to the given input_format. + + Required keys are "imgs", "num_clips" and "clip_len", added or modified + keys are "imgs" and "input_shape". + + Args: + input_format (str): Define the final imgs format. + """ + + def __init__(self, input_format): + self.input_format = input_format + if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']: + raise ValueError( + f'The input format {self.input_format} is invalid.') + + def __call__(self, results): + """Performs the FormatShape formating. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + imgs = results['imgs'] + # [M x H x W x C] + # M = 1 * N_crops * N_clips * L + if self.input_format == 'NCTHW': + num_clips = results['num_clips'] + clip_len = results['clip_len'] + + imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) + # N_crops x N_clips x L x H x W x C + imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4)) + # N_crops x N_clips x C x L x H x W + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + # M' x C x L x H x W + # M' = N_crops x N_clips + elif self.input_format == 'NCHW': + imgs = np.transpose(imgs, (0, 3, 1, 2)) + # M x C x H x W + elif self.input_format == 'NCHW_Flow': + num_clips = results['num_clips'] + clip_len = results['clip_len'] + imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) + # N_crops x N_clips x L x H x W x C + imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4)) + # N_crops x N_clips x L x C x H x W + imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) + + imgs.shape[4:]) + # M' x C' x H x W + # M' = N_crops x N_clips + # C' = L x C + elif self.input_format == 'NPTCHW': + num_proposals = results['num_proposals'] + num_clips = results['num_clips'] + clip_len = results['clip_len'] + imgs = imgs.reshape((num_proposals, num_clips * clip_len) + + imgs.shape[1:]) + # P x M x H x W x C + # M = N_clips x L + imgs = np.transpose(imgs, (0, 1, 4, 2, 3)) + # P x M x C x H x W + + results['imgs'] = imgs + results['input_shape'] = imgs.shape + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f"(input_format='{self.input_format}')" + return repr_str + + +@PIPELINES.register_module() +class FormatAudioShape: + """Format final audio shape to the given input_format. + + Required keys are "imgs", "num_clips" and "clip_len", added or modified + keys are "imgs" and "input_shape". + + Args: + input_format (str): Define the final imgs format. + """ + + def __init__(self, input_format): + self.input_format = input_format + if self.input_format not in ['NCTF']: + raise ValueError( + f'The input format {self.input_format} is invalid.') + + def __call__(self, results): + """Performs the FormatShape formatting. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + audios = results['audios'] + # clip x sample x freq -> clip x channel x sample x freq + clip, sample, freq = audios.shape + audios = audios.reshape(clip, 1, sample, freq) + results['audios'] = audios + results['input_shape'] = audios.shape + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f"(input_format='{self.input_format}')" + return repr_str diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/loading.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/loading.py new file mode 100644 index 0000000..bde5e94 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/pipelines/loading.py @@ -0,0 +1,1633 @@ +import io +import os +import os.path as osp +import shutil +import warnings + +import mmcv +import numpy as np +import torch +from mmcv.fileio import FileClient +from torch.nn.modules.utils import _pair + +from ...utils import get_random_string, get_shm_dir, get_thread_id +from ..registry import PIPELINES + + +@PIPELINES.register_module() +class LoadHVULabel: + """Convert the HVU label from dictionaries to torch tensors. + + Required keys are "label", "categories", "category_nums", added or modified + keys are "label", "mask" and "category_mask". + """ + + def __init__(self, **kwargs): + self.hvu_initialized = False + self.kwargs = kwargs + + def init_hvu_info(self, categories, category_nums): + assert len(categories) == len(category_nums) + self.categories = categories + self.category_nums = category_nums + self.num_categories = len(self.categories) + self.num_tags = sum(self.category_nums) + self.category2num = dict(zip(categories, category_nums)) + self.start_idx = [0] + for i in range(self.num_categories - 1): + self.start_idx.append(self.start_idx[-1] + self.category_nums[i]) + self.category2startidx = dict(zip(categories, self.start_idx)) + self.hvu_initialized = True + + def __call__(self, results): + """Convert the label dictionary to 3 tensors: "label", "mask" and + "category_mask". + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + + if not self.hvu_initialized: + self.init_hvu_info(results['categories'], results['category_nums']) + + onehot = torch.zeros(self.num_tags) + onehot_mask = torch.zeros(self.num_tags) + category_mask = torch.zeros(self.num_categories) + + for category, tags in results['label'].items(): + category_mask[self.categories.index(category)] = 1. + start_idx = self.category2startidx[category] + category_num = self.category2num[category] + tags = [idx + start_idx for idx in tags] + onehot[tags] = 1. + onehot_mask[start_idx:category_num + start_idx] = 1. + + results['label'] = onehot + results['mask'] = onehot_mask + results['category_mask'] = category_mask + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'hvu_initialized={self.hvu_initialized})') + return repr_str + + +@PIPELINES.register_module() +class SampleFrames: + """Sample frames from the video. + + Required keys are "filename", "total_frames", "start_index" , added or + modified keys are "frame_inds", "frame_interval" and "num_clips". + + Args: + clip_len (int): Frames of each sampled output clip. + frame_interval (int): Temporal interval of adjacent sampled frames. + Default: 1. + num_clips (int): Number of clips to be sampled. Default: 1. + temporal_jitter (bool): Whether to apply temporal jittering. + Default: False. + twice_sample (bool): Whether to use twice sample when testing. + If set to True, it will sample frames with and without fixed shift, + which is commonly used for testing in TSM model. Default: False. + out_of_bound_opt (str): The way to deal with out of bounds frame + indexes. Available options are 'loop', 'repeat_last'. + Default: 'loop'. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + start_index (None): This argument is deprecated and moved to dataset + class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc), + see this: https://github.com/open-mmlab/mmaction2/pull/89. + """ + + def __init__(self, + clip_len, + frame_interval=1, + num_clips=1, + temporal_jitter=False, + twice_sample=False, + out_of_bound_opt='loop', + test_mode=False, + start_index=None): + + self.clip_len = clip_len + self.frame_interval = frame_interval + self.num_clips = num_clips + self.temporal_jitter = temporal_jitter + self.twice_sample = twice_sample + self.out_of_bound_opt = out_of_bound_opt + self.test_mode = test_mode + assert self.out_of_bound_opt in ['loop', 'repeat_last'] + + if start_index is not None: + warnings.warn('No longer support "start_index" in "SampleFrames", ' + 'it should be set in dataset class, see this pr: ' + 'https://github.com/open-mmlab/mmaction2/pull/89') + + def _get_train_clips(self, num_frames): + """Get clip offsets in train mode. + + It will calculate the average interval for selected frames, + and randomly shift them within offsets between [0, avg_interval]. + If the total number of frames is smaller than clips num or origin + frames length, it will return all zero indices. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices in train mode. + """ + ori_clip_len = self.clip_len * self.frame_interval + avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips + + if avg_interval > 0: + base_offsets = np.arange(self.num_clips) * avg_interval + clip_offsets = base_offsets + np.random.randint( + avg_interval, size=self.num_clips) + elif num_frames > max(self.num_clips, ori_clip_len): + clip_offsets = np.sort( + np.random.randint( + num_frames - ori_clip_len + 1, size=self.num_clips)) + elif avg_interval == 0: + ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips + clip_offsets = np.around(np.arange(self.num_clips) * ratio) + else: + clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + + return clip_offsets + + def _get_test_clips(self, num_frames): + """Get clip offsets in test mode. + + Calculate the average interval for selected frames, and shift them + fixedly by avg_interval/2. If set twice_sample True, it will sample + frames together without fixed shift. If the total number of frames is + not enough, it will return all zero indices. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices in test mode. + """ + ori_clip_len = self.clip_len * self.frame_interval + avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips) + if num_frames > ori_clip_len - 1: + base_offsets = np.arange(self.num_clips) * avg_interval + clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int) + if self.twice_sample: + clip_offsets = np.concatenate([clip_offsets, base_offsets]) + else: + clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + return clip_offsets + + def _sample_clips(self, num_frames): + """Choose clip offsets for the video in a given mode. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices. + """ + if self.test_mode: + clip_offsets = self._get_test_clips(num_frames) + else: + clip_offsets = self._get_train_clips(num_frames) + + return clip_offsets + + def __call__(self, results): + """Perform the SampleFrames loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + total_frames = results['total_frames'] + + clip_offsets = self._sample_clips(total_frames) + frame_inds = clip_offsets[:, None] + np.arange( + self.clip_len)[None, :] * self.frame_interval + frame_inds = np.concatenate(frame_inds) + + if self.temporal_jitter: + perframe_offsets = np.random.randint( + self.frame_interval, size=len(frame_inds)) + frame_inds += perframe_offsets + + frame_inds = frame_inds.reshape((-1, self.clip_len)) + if self.out_of_bound_opt == 'loop': + frame_inds = np.mod(frame_inds, total_frames) + elif self.out_of_bound_opt == 'repeat_last': + safe_inds = frame_inds < total_frames + unsafe_inds = 1 - safe_inds + last_ind = np.max(safe_inds * frame_inds, axis=1) + new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T) + frame_inds = new_inds + else: + raise ValueError('Illegal out_of_bound option.') + + start_index = results['start_index'] + frame_inds = np.concatenate(frame_inds) + start_index + results['frame_inds'] = frame_inds.astype(np.int) + results['clip_len'] = self.clip_len + results['frame_interval'] = self.frame_interval + results['num_clips'] = self.num_clips + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'frame_interval={self.frame_interval}, ' + f'num_clips={self.num_clips}, ' + f'temporal_jitter={self.temporal_jitter}, ' + f'twice_sample={self.twice_sample}, ' + f'out_of_bound_opt={self.out_of_bound_opt}, ' + f'test_mode={self.test_mode})') + return repr_str + + +@PIPELINES.register_module() +class UntrimmedSampleFrames: + """Sample frames from the untrimmed video. + + Required keys are "filename", "total_frames", added or modified keys are + "frame_inds", "frame_interval" and "num_clips". + + Args: + clip_len (int): The length of sampled clips. Default: 1. + frame_interval (int): Temporal interval of adjacent sampled frames. + Default: 16. + start_index (None): This argument is deprecated and moved to dataset + class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc), + see this: https://github.com/open-mmlab/mmaction2/pull/89. + """ + + def __init__(self, clip_len=1, frame_interval=16, start_index=None): + + self.clip_len = clip_len + self.frame_interval = frame_interval + + if start_index is not None: + warnings.warn('No longer support "start_index" in "SampleFrames", ' + 'it should be set in dataset class, see this pr: ' + 'https://github.com/open-mmlab/mmaction2/pull/89') + + def __call__(self, results): + """Perform the SampleFrames loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + total_frames = results['total_frames'] + start_index = results['start_index'] + + clip_centers = np.arange(self.frame_interval // 2, total_frames, + self.frame_interval) + num_clips = clip_centers.shape[0] + frame_inds = clip_centers[:, None] + np.arange( + -(self.clip_len // 2), self.clip_len - + (self.clip_len // 2))[None, :] + # clip frame_inds to legal range + frame_inds = np.clip(frame_inds, 0, total_frames - 1) + + frame_inds = np.concatenate(frame_inds) + start_index + results['frame_inds'] = frame_inds.astype(np.int) + results['clip_len'] = self.clip_len + results['frame_interval'] = self.frame_interval + results['num_clips'] = num_clips + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'frame_interval={self.frame_interval})') + return repr_str + + +@PIPELINES.register_module() +class DenseSampleFrames(SampleFrames): + """Select frames from the video by dense sample strategy. + + Required keys are "filename", added or modified keys are "total_frames", + "frame_inds", "frame_interval" and "num_clips". + + Args: + clip_len (int): Frames of each sampled output clip. + frame_interval (int): Temporal interval of adjacent sampled frames. + Default: 1. + num_clips (int): Number of clips to be sampled. Default: 1. + sample_range (int): Total sample range for dense sample. + Default: 64. + num_sample_positions (int): Number of sample start positions, Which is + only used in test mode. Default: 10. That is to say, by default, + there are at least 10 clips for one input sample in test mode. + temporal_jitter (bool): Whether to apply temporal jittering. + Default: False. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + """ + + def __init__(self, + clip_len, + frame_interval=1, + num_clips=1, + sample_range=64, + num_sample_positions=10, + temporal_jitter=False, + out_of_bound_opt='loop', + test_mode=False): + super().__init__( + clip_len, + frame_interval, + num_clips, + temporal_jitter, + out_of_bound_opt=out_of_bound_opt, + test_mode=test_mode) + self.sample_range = sample_range + self.num_sample_positions = num_sample_positions + + def _get_train_clips(self, num_frames): + """Get clip offsets by dense sample strategy in train mode. + + It will calculate a sample position and sample interval and set + start index 0 when sample_pos == 1 or randomly choose from + [0, sample_pos - 1]. Then it will shift the start index by each + base offset. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices in train mode. + """ + sample_position = max(1, 1 + num_frames - self.sample_range) + interval = self.sample_range // self.num_clips + start_idx = 0 if sample_position == 1 else np.random.randint( + 0, sample_position - 1) + base_offsets = np.arange(self.num_clips) * interval + clip_offsets = (base_offsets + start_idx) % num_frames + return clip_offsets + + def _get_test_clips(self, num_frames): + """Get clip offsets by dense sample strategy in test mode. + + It will calculate a sample position and sample interval and evenly + sample several start indexes as start positions between + [0, sample_position-1]. Then it will shift each start index by the + base offsets. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices in train mode. + """ + sample_position = max(1, 1 + num_frames - self.sample_range) + interval = self.sample_range // self.num_clips + start_list = np.linspace( + 0, sample_position - 1, num=self.num_sample_positions, dtype=int) + base_offsets = np.arange(self.num_clips) * interval + clip_offsets = list() + for start_idx in start_list: + clip_offsets.extend((base_offsets + start_idx) % num_frames) + clip_offsets = np.array(clip_offsets) + return clip_offsets + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'frame_interval={self.frame_interval}, ' + f'num_clips={self.num_clips}, ' + f'sample_range={self.sample_range}, ' + f'num_sample_positions={self.num_sample_positions}, ' + f'temporal_jitter={self.temporal_jitter}, ' + f'out_of_bound_opt={self.out_of_bound_opt}, ' + f'test_mode={self.test_mode})') + return repr_str + + +@PIPELINES.register_module() +class SampleAVAFrames(SampleFrames): + + def __init__(self, clip_len, frame_interval=2, test_mode=False): + + super().__init__(clip_len, frame_interval, test_mode=test_mode) + + def _get_clips(self, center_index, skip_offsets, shot_info): + start = center_index - (self.clip_len // 2) * self.frame_interval + end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval + frame_inds = list(range(start, end, self.frame_interval)) + frame_inds = frame_inds + skip_offsets + frame_inds = np.clip(frame_inds, shot_info[0], shot_info[1] - 1) + + return frame_inds + + def __call__(self, results): + fps = results['fps'] + timestamp = results['timestamp'] + timestamp_start = results['timestamp_start'] + shot_info = results['shot_info'] + + center_index = fps * (timestamp - timestamp_start) + 1 + + skip_offsets = np.random.randint( + -self.frame_interval // 2, (self.frame_interval + 1) // 2, + size=self.clip_len) + frame_inds = self._get_clips(center_index, skip_offsets, shot_info) + + results['frame_inds'] = np.array(frame_inds, dtype=np.int) + results['clip_len'] = self.clip_len + results['frame_interval'] = self.frame_interval + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'frame_interval={self.frame_interval}, ' + f'test_mode={self.test_mode})') + return repr_str + + +@PIPELINES.register_module() +class SampleProposalFrames(SampleFrames): + """Sample frames from proposals in the video. + + Required keys are "total_frames" and "out_proposals", added or + modified keys are "frame_inds", "frame_interval", "num_clips", + 'clip_len' and 'num_proposals'. + + Args: + clip_len (int): Frames of each sampled output clip. + body_segments (int): Number of segments in course period. + aug_segments (list[int]): Number of segments in starting and + ending period. + aug_ratio (int | float | tuple[int | float]): The ratio + of the length of augmentation to that of the proposal. + frame_interval (int): Temporal interval of adjacent sampled frames. + Default: 1. + test_interval (int): Temporal interval of adjacent sampled frames + in test mode. Default: 6. + temporal_jitter (bool): Whether to apply temporal jittering. + Default: False. + mode (str): Choose 'train', 'val' or 'test' mode. + Default: 'train'. + """ + + def __init__(self, + clip_len, + body_segments, + aug_segments, + aug_ratio, + frame_interval=1, + test_interval=6, + temporal_jitter=False, + mode='train'): + super().__init__( + clip_len, + frame_interval=frame_interval, + temporal_jitter=temporal_jitter) + self.body_segments = body_segments + self.aug_segments = aug_segments + self.aug_ratio = _pair(aug_ratio) + if not mmcv.is_tuple_of(self.aug_ratio, (int, float)): + raise TypeError(f'aug_ratio should be int, float' + f'or tuple of int and float, ' + f'but got {type(aug_ratio)}') + assert len(self.aug_ratio) == 2 + assert mode in ['train', 'val', 'test'] + self.mode = mode + self.test_interval = test_interval + + @staticmethod + def _get_train_indices(valid_length, num_segments): + """Get indices of different stages of proposals in train mode. + + It will calculate the average interval for each segment, + and randomly shift them within offsets between [0, average_duration]. + If the total number of frames is smaller than num segments, it will + return all zero indices. + + Args: + valid_length (int): The length of the starting point's + valid interval. + num_segments (int): Total number of segments. + + Returns: + np.ndarray: Sampled frame indices in train mode. + """ + avg_interval = (valid_length + 1) // num_segments + if avg_interval > 0: + base_offsets = np.arange(num_segments) * avg_interval + offsets = base_offsets + np.random.randint( + avg_interval, size=num_segments) + else: + offsets = np.zeros((num_segments, ), dtype=np.int) + + return offsets + + @staticmethod + def _get_val_indices(valid_length, num_segments): + """Get indices of different stages of proposals in validation mode. + + It will calculate the average interval for each segment. + If the total number of valid length is smaller than num segments, + it will return all zero indices. + + Args: + valid_length (int): The length of the starting point's + valid interval. + num_segments (int): Total number of segments. + + Returns: + np.ndarray: Sampled frame indices in validation mode. + """ + if valid_length >= num_segments: + avg_interval = valid_length / float(num_segments) + base_offsets = np.arange(num_segments) * avg_interval + offsets = (base_offsets + avg_interval / 2.0).astype(np.int) + else: + offsets = np.zeros((num_segments, ), dtype=np.int) + + return offsets + + def _get_proposal_clips(self, proposal, num_frames): + """Get clip offsets in train mode. + + It will calculate sampled frame indices in the proposal's three + stages: starting, course and ending stage. + + Args: + proposal (obj): The proposal object. + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices in train mode. + """ + # proposal interval: [start_frame, end_frame) + start_frame = proposal.start_frame + end_frame = proposal.end_frame + ori_clip_len = self.clip_len * self.frame_interval + + duration = end_frame - start_frame + assert duration != 0 + valid_length = duration - ori_clip_len + + valid_starting = max(0, + start_frame - int(duration * self.aug_ratio[0])) + valid_ending = min(num_frames - ori_clip_len + 1, + end_frame - 1 + int(duration * self.aug_ratio[1])) + + valid_starting_length = start_frame - valid_starting - ori_clip_len + valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len + + if self.mode == 'train': + starting_offsets = self._get_train_indices(valid_starting_length, + self.aug_segments[0]) + course_offsets = self._get_train_indices(valid_length, + self.body_segments) + ending_offsets = self._get_train_indices(valid_ending_length, + self.aug_segments[1]) + elif self.mode == 'val': + starting_offsets = self._get_val_indices(valid_starting_length, + self.aug_segments[0]) + course_offsets = self._get_val_indices(valid_length, + self.body_segments) + ending_offsets = self._get_val_indices(valid_ending_length, + self.aug_segments[1]) + starting_offsets += valid_starting + course_offsets += start_frame + ending_offsets += end_frame + + offsets = np.concatenate( + (starting_offsets, course_offsets, ending_offsets)) + return offsets + + def _get_train_clips(self, num_frames, proposals): + """Get clip offsets in train mode. + + It will calculate sampled frame indices of each proposal, and then + assemble them. + + Args: + num_frames (int): Total number of frame in the video. + proposals (list): Proposals fetched. + + Returns: + np.ndarray: Sampled frame indices in train mode. + """ + clip_offsets = [] + for proposal in proposals: + proposal_clip_offsets = self._get_proposal_clips( + proposal[0][1], num_frames) + clip_offsets = np.concatenate( + [clip_offsets, proposal_clip_offsets]) + + return clip_offsets + + def _get_test_clips(self, num_frames): + """Get clip offsets in test mode. + + It will calculate sampled frame indices based on test interval. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices in test mode. + """ + ori_clip_len = self.clip_len * self.frame_interval + return np.arange( + 0, num_frames - ori_clip_len, self.test_interval, dtype=np.int) + + def _sample_clips(self, num_frames, proposals): + """Choose clip offsets for the video in a given mode. + + Args: + num_frames (int): Total number of frame in the video. + proposals (list | None): Proposals fetched. + It is set to None in test mode. + + Returns: + np.ndarray: Sampled frame indices. + """ + if self.mode == 'test': + clip_offsets = self._get_test_clips(num_frames) + else: + assert proposals is not None + clip_offsets = self._get_train_clips(num_frames, proposals) + + return clip_offsets + + def __call__(self, results): + """Perform the SampleFrames loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + total_frames = results['total_frames'] + + out_proposals = results.get('out_proposals', None) + clip_offsets = self._sample_clips(total_frames, out_proposals) + frame_inds = clip_offsets[:, None] + np.arange( + self.clip_len)[None, :] * self.frame_interval + frame_inds = np.concatenate(frame_inds) + + if self.temporal_jitter: + perframe_offsets = np.random.randint( + self.frame_interval, size=len(frame_inds)) + frame_inds += perframe_offsets + + start_index = results['start_index'] + frame_inds = np.mod(frame_inds, total_frames) + start_index + + results['frame_inds'] = np.array(frame_inds).astype(np.int) + results['clip_len'] = self.clip_len + results['frame_interval'] = self.frame_interval + results['num_clips'] = ( + self.body_segments + self.aug_segments[0] + self.aug_segments[1]) + if self.mode in ['train', 'val']: + results['num_proposals'] = len(results['out_proposals']) + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'body_segments={self.body_segments}, ' + f'aug_segments={self.aug_segments}, ' + f'aug_ratio={self.aug_ratio}, ' + f'frame_interval={self.frame_interval}, ' + f'test_interval={self.test_interval}, ' + f'temporal_jitter={self.temporal_jitter}, ' + f'mode={self.mode})') + return repr_str + + +@PIPELINES.register_module() +class PyAVInit: + """Using pyav to initialize the video. + + PyAV: https://github.com/mikeboers/PyAV + + Required keys are "filename", + added or modified keys are "video_reader", and "total_frames". + + Args: + io_backend (str): io backend where frames are store. + Default: 'disk'. + kwargs (dict): Args for file client. + """ + + def __init__(self, io_backend='disk', **kwargs): + self.io_backend = io_backend + self.kwargs = kwargs + self.file_client = None + + def __call__(self, results): + """Perform the PyAV initialization. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + try: + import av + except ImportError: + raise ImportError('Please run "conda install av -c conda-forge" ' + 'or "pip install av" to install PyAV first.') + + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + + file_obj = io.BytesIO(self.file_client.get(results['filename'])) + container = av.open(file_obj) + + results['video_reader'] = container + results['total_frames'] = container.streams.video[0].frames + + return results + + def __repr__(self): + repr_str = f'{self.__class__.__name__}(io_backend=disk)' + return repr_str + + +@PIPELINES.register_module() +class PyAVDecode: + """Using pyav to decode the video. + + PyAV: https://github.com/mikeboers/PyAV + + Required keys are "video_reader" and "frame_inds", + added or modified keys are "imgs", "img_shape" and "original_shape". + + Args: + multi_thread (bool): If set to True, it will apply multi + thread processing. Default: False. + """ + + def __init__(self, multi_thread=False): + self.multi_thread = multi_thread + + def __call__(self, results): + """Perform the PyAV decoding. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + container = results['video_reader'] + imgs = list() + + if self.multi_thread: + container.streams.video[0].thread_type = 'AUTO' + if results['frame_inds'].ndim != 1: + results['frame_inds'] = np.squeeze(results['frame_inds']) + + # set max indice to make early stop + max_inds = max(results['frame_inds']) + i = 0 + for frame in container.decode(video=0): + if i > max_inds + 1: + break + imgs.append(frame.to_rgb().to_ndarray()) + i += 1 + + results['video_reader'] = None + del container + + # the available frame in pyav may be less than its length, + # which may raise error + results['imgs'] = [imgs[i % len(imgs)] for i in results['frame_inds']] + + results['original_shape'] = imgs[0].shape[:2] + results['img_shape'] = imgs[0].shape[:2] + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(multi_thread={self.multi_thread})' + return repr_str + + +@PIPELINES.register_module() +class PyAVDecodeMotionVector(PyAVDecode): + """Using pyav to decode the motion vectors from video. + + Reference: https://github.com/PyAV-Org/PyAV/ + blob/main/tests/test_decode.py + + Required keys are "video_reader" and "frame_inds", + added or modified keys are "motion_vectors", "frame_inds". + + Args: + multi_thread (bool): If set to True, it will apply multi + thread processing. Default: False. + """ + + @staticmethod + def _parse_vectors(mv, vectors, height, width): + """Parse the returned vectors.""" + (w, h, src_x, src_y, dst_x, + dst_y) = (vectors['w'], vectors['h'], vectors['src_x'], + vectors['src_y'], vectors['dst_x'], vectors['dst_y']) + val_x = dst_x - src_x + val_y = dst_y - src_y + start_x = dst_x - w // 2 + start_y = dst_y - h // 2 + end_x = start_x + w + end_y = start_y + h + for sx, ex, sy, ey, vx, vy in zip(start_x, end_x, start_y, end_y, + val_x, val_y): + if (sx >= 0 and ex < width and sy >= 0 and ey < height): + mv[sy:ey, sx:ex] = (vx, vy) + + return mv + + def __call__(self, results): + """Perform the PyAV motion vector decoding. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + container = results['video_reader'] + imgs = list() + + if self.multi_thread: + container.streams.video[0].thread_type = 'AUTO' + if results['frame_inds'].ndim != 1: + results['frame_inds'] = np.squeeze(results['frame_inds']) + + # set max index to make early stop + max_idx = max(results['frame_inds']) + i = 0 + stream = container.streams.video[0] + codec_context = stream.codec_context + codec_context.options = {'flags2': '+export_mvs'} + for packet in container.demux(stream): + for frame in packet.decode(): + if i > max_idx + 1: + break + i += 1 + height = frame.height + width = frame.width + mv = np.zeros((height, width, 2), dtype=np.int8) + vectors = frame.side_data.get('MOTION_VECTORS') + if frame.key_frame: + # Key frame don't have motion vectors + assert vectors is None + if vectors is not None and len(vectors) > 0: + mv = self._parse_vectors(mv, vectors.to_ndarray(), height, + width) + imgs.append(mv) + + results['video_reader'] = None + del container + + # the available frame in pyav may be less than its length, + # which may raise error + results['motion_vectors'] = np.array( + [imgs[i % len(imgs)] for i in results['frame_inds']]) + return results + + +@PIPELINES.register_module() +class DecordInit: + """Using decord to initialize the video_reader. + + Decord: https://github.com/dmlc/decord + + Required keys are "filename", + added or modified keys are "video_reader" and "total_frames". + """ + + def __init__(self, io_backend='disk', num_threads=1, **kwargs): + self.io_backend = io_backend + self.num_threads = num_threads + self.kwargs = kwargs + self.file_client = None + + def __call__(self, results): + """Perform the Decord initialization. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + try: + import decord + except ImportError: + raise ImportError( + 'Please run "pip install decord" to install Decord first.') + + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + + file_obj = io.BytesIO(self.file_client.get(results['filename'])) + container = decord.VideoReader(file_obj, num_threads=self.num_threads) + results['video_reader'] = container + results['total_frames'] = len(container) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'io_backend={self.io_backend}, ' + f'num_threads={self.num_threads})') + return repr_str + + +@PIPELINES.register_module() +class DecordDecode: + """Using decord to decode the video. + + Decord: https://github.com/dmlc/decord + + Required keys are "video_reader", "filename" and "frame_inds", + added or modified keys are "imgs" and "original_shape". + """ + + def __call__(self, results): + """Perform the Decord decoding. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + container = results['video_reader'] + + if results['frame_inds'].ndim != 1: + results['frame_inds'] = np.squeeze(results['frame_inds']) + + frame_inds = results['frame_inds'] + # Generate frame index mapping in order + frame_dict = { + idx: container[idx].asnumpy() + for idx in np.unique(frame_inds) + } + + imgs = [frame_dict[idx] for idx in frame_inds] + + results['video_reader'] = None + del container + + results['imgs'] = imgs + results['original_shape'] = imgs[0].shape[:2] + results['img_shape'] = imgs[0].shape[:2] + + return results + + +@PIPELINES.register_module() +class OpenCVInit: + """Using OpenCV to initialize the video_reader. + + Required keys are "filename", added or modified keys are "new_path", + "video_reader" and "total_frames". + """ + + def __init__(self, io_backend='disk', **kwargs): + self.io_backend = io_backend + self.kwargs = kwargs + self.file_client = None + self.tmp_folder = None + if self.io_backend != 'disk': + random_string = get_random_string() + thread_id = get_thread_id() + self.tmp_folder = osp.join(get_shm_dir(), + f'{random_string}_{thread_id}') + os.mkdir(self.tmp_folder) + + def __call__(self, results): + """Perform the OpenCV initialization. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + if self.io_backend == 'disk': + new_path = results['filename'] + else: + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + + thread_id = get_thread_id() + # save the file of same thread at the same place + new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4') + with open(new_path, 'wb') as f: + f.write(self.file_client.get(results['filename'])) + + container = mmcv.VideoReader(new_path) + results['new_path'] = new_path + results['video_reader'] = container + results['total_frames'] = len(container) + + return results + + def __del__(self): + if self.tmp_folder and osp.exists(self.tmp_folder): + shutil.rmtree(self.tmp_folder) + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'io_backend={self.io_backend})') + return repr_str + + +@PIPELINES.register_module() +class OpenCVDecode: + """Using OpenCV to decode the video. + + Required keys are "video_reader", "filename" and "frame_inds", added or + modified keys are "imgs", "img_shape" and "original_shape". + """ + + def __call__(self, results): + """Perform the OpenCV decoding. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + container = results['video_reader'] + imgs = list() + + if results['frame_inds'].ndim != 1: + results['frame_inds'] = np.squeeze(results['frame_inds']) + + for frame_ind in results['frame_inds']: + cur_frame = container[frame_ind] + # last frame may be None in OpenCV + while isinstance(cur_frame, type(None)): + frame_ind -= 1 + cur_frame = container[frame_ind] + imgs.append(cur_frame) + + results['video_reader'] = None + del container + + imgs = np.array(imgs) + # The default channel order of OpenCV is BGR, thus we change it to RGB + imgs = imgs[:, :, :, ::-1] + results['imgs'] = list(imgs) + results['original_shape'] = imgs[0].shape[:2] + results['img_shape'] = imgs[0].shape[:2] + + return results + + +@PIPELINES.register_module() +class RawFrameDecode: + """Load and decode frames with given indices. + + Required keys are "frame_dir", "filename_tmpl" and "frame_inds", + added or modified keys are "imgs", "img_shape" and "original_shape". + + Args: + io_backend (str): IO backend where frames are stored. Default: 'disk'. + decoding_backend (str): Backend used for image decoding. + Default: 'cv2'. + kwargs (dict, optional): Arguments for FileClient. + """ + + def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs): + self.io_backend = io_backend + self.decoding_backend = decoding_backend + self.kwargs = kwargs + self.file_client = None + + def __call__(self, results): + """Perform the ``RawFrameDecode`` to pick frames given indices. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + mmcv.use_backend(self.decoding_backend) + + directory = results['frame_dir'] + filename_tmpl = results['filename_tmpl'] + modality = results['modality'] + + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + + imgs = list() + + if results['frame_inds'].ndim != 1: + results['frame_inds'] = np.squeeze(results['frame_inds']) + + offset = results.get('offset', 0) + + for frame_idx in results['frame_inds']: + frame_idx += offset + if modality == 'RGB': + filepath = osp.join(directory, filename_tmpl.format(frame_idx)) + img_bytes = self.file_client.get(filepath) + # Get frame with channel order RGB directly. + cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb') + imgs.append(cur_frame) + elif modality == 'Flow': + x_filepath = osp.join(directory, + filename_tmpl.format('x', frame_idx)) + y_filepath = osp.join(directory, + filename_tmpl.format('y', frame_idx)) + x_img_bytes = self.file_client.get(x_filepath) + x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale') + y_img_bytes = self.file_client.get(y_filepath) + y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale') + imgs.extend([x_frame, y_frame]) + else: + raise NotImplementedError + + results['imgs'] = imgs + results['original_shape'] = imgs[0].shape[:2] + results['img_shape'] = imgs[0].shape[:2] + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'io_backend={self.io_backend}, ' + f'decoding_backend={self.decoding_backend})') + return repr_str + + +@PIPELINES.register_module() +class ImageDecode: + """Load and decode images. + + Required key is "filename", added or modified keys are "imgs", "img_shape" + and "original_shape". + + Args: + io_backend (str): IO backend where frames are stored. Default: 'disk'. + decoding_backend (str): Backend used for image decoding. + Default: 'cv2'. + kwargs (dict, optional): Arguments for FileClient. + """ + + def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs): + self.io_backend = io_backend + self.decoding_backend = decoding_backend + self.kwargs = kwargs + self.file_client = None + + def __call__(self, results): + """Perform the ``ImageDecode`` to load image given the file path. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + mmcv.use_backend(self.decoding_backend) + + filename = results['filename'] + + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + + imgs = list() + img_bytes = self.file_client.get(filename) + + img = mmcv.imfrombytes(img_bytes, channel_order='rgb') + imgs.append(img) + + results['imgs'] = imgs + results['original_shape'] = imgs[0].shape[:2] + results['img_shape'] = imgs[0].shape[:2] + return results + + +@PIPELINES.register_module() +class AudioDecodeInit: + """Using librosa to initialize the audio reader. + + Required keys are "audio_path", added or modified keys are "length", + "sample_rate", "audios". + + Args: + io_backend (str): io backend where frames are store. + Default: 'disk'. + sample_rate (int): Audio sampling times per second. Default: 16000. + """ + + def __init__(self, + io_backend='disk', + sample_rate=16000, + pad_method='zero', + **kwargs): + self.io_backend = io_backend + self.sample_rate = sample_rate + if pad_method in ['random', 'zero']: + self.pad_method = pad_method + else: + raise NotImplementedError + self.kwargs = kwargs + self.file_client = None + + @staticmethod + def _zero_pad(shape): + return np.zeros(shape, dtype=np.float32) + + @staticmethod + def _random_pad(shape): + # librosa load raw audio file into a distribution of -1~+1 + return np.random.rand(shape).astype(np.float32) * 2 - 1 + + def __call__(self, results): + """Perform the librosa initialization. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + try: + import librosa + except ImportError: + raise ImportError('Please install librosa first.') + + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + if osp.exists(results['audio_path']): + file_obj = io.BytesIO(self.file_client.get(results['audio_path'])) + y, sr = librosa.load(file_obj, sr=self.sample_rate) + else: + # Generate a random dummy 10s input + pad_func = getattr(self, f'_{self.pad_method}_pad') + y = pad_func(int(round(10.0 * self.sample_rate))) + sr = self.sample_rate + + results['length'] = y.shape[0] + results['sample_rate'] = sr + results['audios'] = y + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'io_backend={self.io_backend}, ' + f'sample_rate={self.sample_rate}, ' + f'pad_method={self.pad_method})') + return repr_str + + +@PIPELINES.register_module() +class LoadAudioFeature: + """Load offline extracted audio features. + + Required keys are "audio_path", added or modified keys are "length", + audios". + """ + + def __init__(self, pad_method='zero'): + if pad_method not in ['zero', 'random']: + raise NotImplementedError + self.pad_method = pad_method + + @staticmethod + def _zero_pad(shape): + return np.zeros(shape, dtype=np.float32) + + @staticmethod + def _random_pad(shape): + # spectrogram is normalized into a distribution of 0~1 + return np.random.rand(shape).astype(np.float32) + + def __call__(self, results): + """Perform the numpy loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + if osp.exists(results['audio_path']): + feature_map = np.load(results['audio_path']) + else: + # Generate a random dummy 10s input + # Some videos do not have audio stream + pad_func = getattr(self, f'_{self.pad_method}_pad') + feature_map = pad_func((640, 80)) + + results['length'] = feature_map.shape[0] + results['audios'] = feature_map + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'pad_method={self.pad_method})') + return repr_str + + +@PIPELINES.register_module() +class AudioDecode: + """Sample the audio w.r.t. the frames selected. + + Args: + fixed_length (int): As the audio clip selected by frames sampled may + not be exactly the same, `fixed_length` will truncate or pad them + into the same size. Default: 32000. + + Required keys are "frame_inds", "num_clips", "total_frames", "length", + added or modified keys are "audios", "audios_shape". + """ + + def __init__(self, fixed_length=32000): + self.fixed_length = fixed_length + + def __call__(self, results): + """Perform the ``AudioDecode`` to pick audio clips.""" + audio = results['audios'] + frame_inds = results['frame_inds'] + num_clips = results['num_clips'] + resampled_clips = list() + frame_inds = frame_inds.reshape(num_clips, -1) + for clip_idx in range(num_clips): + clip_frame_inds = frame_inds[clip_idx] + start_idx = max( + 0, + int( + round((clip_frame_inds[0] + 1) / results['total_frames'] * + results['length']))) + end_idx = min( + results['length'], + int( + round((clip_frame_inds[-1] + 1) / results['total_frames'] * + results['length']))) + cropped_audio = audio[start_idx:end_idx] + if cropped_audio.shape[0] >= self.fixed_length: + truncated_audio = cropped_audio[:self.fixed_length] + else: + truncated_audio = np.pad( + cropped_audio, + ((0, self.fixed_length - cropped_audio.shape[0])), + mode='constant') + + resampled_clips.append(truncated_audio) + + results['audios'] = np.array(resampled_clips) + results['audios_shape'] = results['audios'].shape + return results + + +@PIPELINES.register_module() +class BuildPseudoClip: + """Build pseudo clips with one single image by repeating it n times. + + Required key is "imgs", added or modified key is "imgs", "num_clips", + "clip_len". + + Args: + clip_len (int): Frames of the generated pseudo clips. + """ + + def __init__(self, clip_len): + self.clip_len = clip_len + + def __call__(self, results): + # the input should be one single image + assert len(results['imgs']) == 1 + im = results['imgs'][0] + for _ in range(1, self.clip_len): + results['imgs'].append(np.copy(im)) + results['clip_len'] = self.clip_len + results['num_clips'] = 1 + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'fix_length={self.fixed_length})') + return repr_str + + +@PIPELINES.register_module() +class FrameSelector(RawFrameDecode): + """Deprecated class for ``RawFrameDecode``.""" + + def __init__(self, *args, **kwargs): + warnings.warn('"FrameSelector" is deprecated, please switch to' + '"RawFrameDecode"') + super().__init__(*args, **kwargs) + + +@PIPELINES.register_module() +class AudioFeatureSelector: + """Sample the audio feature w.r.t. the frames selected. + + Required keys are "audios", "frame_inds", "num_clips", "length", + "total_frames", added or modified keys are "audios", "audios_shape". + + Args: + fixed_length (int): As the features selected by frames sampled may + not be extactly the same, `fixed_length` will truncate or pad them + into the same size. Default: 128. + """ + + def __init__(self, fixed_length=128): + self.fixed_length = fixed_length + + def __call__(self, results): + """Perform the ``AudioFeatureSelector`` to pick audio feature clips. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + audio = results['audios'] + frame_inds = results['frame_inds'] + num_clips = results['num_clips'] + resampled_clips = list() + + frame_inds = frame_inds.reshape(num_clips, -1) + for clip_idx in range(num_clips): + clip_frame_inds = frame_inds[clip_idx] + start_idx = max( + 0, + int( + round((clip_frame_inds[0] + 1) / results['total_frames'] * + results['length']))) + end_idx = min( + results['length'], + int( + round((clip_frame_inds[-1] + 1) / results['total_frames'] * + results['length']))) + cropped_audio = audio[start_idx:end_idx, :] + if cropped_audio.shape[0] >= self.fixed_length: + truncated_audio = cropped_audio[:self.fixed_length, :] + else: + truncated_audio = np.pad( + cropped_audio, + ((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)), + mode='constant') + + resampled_clips.append(truncated_audio) + results['audios'] = np.array(resampled_clips) + results['audios_shape'] = results['audios'].shape + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'fix_length={self.fixed_length})') + return repr_str + + +@PIPELINES.register_module() +class LoadLocalizationFeature: + """Load Video features for localizer with given video_name list. + + Required keys are "video_name" and "data_prefix", added or modified keys + are "raw_feature". + + Args: + raw_feature_ext (str): Raw feature file extension. Default: '.csv'. + """ + + def __init__(self, raw_feature_ext='.csv'): + valid_raw_feature_ext = ('.csv', ) + if raw_feature_ext not in valid_raw_feature_ext: + raise NotImplementedError + self.raw_feature_ext = raw_feature_ext + + def __call__(self, results): + """Perform the LoadLocalizationFeature loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + video_name = results['video_name'] + data_prefix = results['data_prefix'] + + data_path = osp.join(data_prefix, video_name + self.raw_feature_ext) + raw_feature = np.loadtxt( + data_path, dtype=np.float32, delimiter=',', skiprows=1) + + results['raw_feature'] = np.transpose(raw_feature, (1, 0)) + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'raw_feature_ext={self.raw_feature_ext})') + return repr_str + + +@PIPELINES.register_module() +class GenerateLocalizationLabels: + """Load video label for localizer with given video_name list. + + Required keys are "duration_frame", "duration_second", "feature_frame", + "annotations", added or modified keys are "gt_bbox". + """ + + def __call__(self, results): + """Perform the GenerateLocalizationLabels loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + video_frame = results['duration_frame'] + video_second = results['duration_second'] + feature_frame = results['feature_frame'] + corrected_second = float(feature_frame) / video_frame * video_second + annotations = results['annotations'] + + gt_bbox = [] + + for annotation in annotations: + current_start = max( + min(1, annotation['segment'][0] / corrected_second), 0) + current_end = max( + min(1, annotation['segment'][1] / corrected_second), 0) + gt_bbox.append([current_start, current_end]) + + gt_bbox = np.array(gt_bbox) + results['gt_bbox'] = gt_bbox + return results + + +@PIPELINES.register_module() +class LoadProposals: + """Loading proposals with given proposal results. + + Required keys are "video_name", added or modified keys are 'bsp_feature', + 'tmin', 'tmax', 'tmin_score', 'tmax_score' and 'reference_temporal_iou'. + + Args: + top_k (int): The top k proposals to be loaded. + pgm_proposals_dir (str): Directory to load proposals. + pgm_features_dir (str): Directory to load proposal features. + proposal_ext (str): Proposal file extension. Default: '.csv'. + feature_ext (str): Feature file extension. Default: '.npy'. + """ + + def __init__(self, + top_k, + pgm_proposals_dir, + pgm_features_dir, + proposal_ext='.csv', + feature_ext='.npy'): + self.top_k = top_k + self.pgm_proposals_dir = pgm_proposals_dir + self.pgm_features_dir = pgm_features_dir + valid_proposal_ext = ('.csv', ) + if proposal_ext not in valid_proposal_ext: + raise NotImplementedError + self.proposal_ext = proposal_ext + valid_feature_ext = ('.npy', ) + if feature_ext not in valid_feature_ext: + raise NotImplementedError + self.feature_ext = feature_ext + + def __call__(self, results): + """Perform the LoadProposals loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + video_name = results['video_name'] + proposal_path = osp.join(self.pgm_proposals_dir, + video_name + self.proposal_ext) + if self.proposal_ext == '.csv': + pgm_proposals = np.loadtxt( + proposal_path, dtype=np.float32, delimiter=',', skiprows=1) + + pgm_proposals = np.array(pgm_proposals[:self.top_k]) + tmin = pgm_proposals[:, 0] + tmax = pgm_proposals[:, 1] + tmin_score = pgm_proposals[:, 2] + tmax_score = pgm_proposals[:, 3] + reference_temporal_iou = pgm_proposals[:, 5] + + feature_path = osp.join(self.pgm_features_dir, + video_name + self.feature_ext) + if self.feature_ext == '.npy': + bsp_feature = np.load(feature_path).astype(np.float32) + + bsp_feature = bsp_feature[:self.top_k, :] + + results['bsp_feature'] = bsp_feature + results['tmin'] = tmin + results['tmax'] = tmax + results['tmin_score'] = tmin_score + results['tmax_score'] = tmax_score + results['reference_temporal_iou'] = reference_temporal_iou + + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'top_k={self.top_k}, ' + f'pgm_proposals_dir={self.pgm_proposals_dir}, ' + f'pgm_features_dir={self.pgm_features_dir}, ' + f'proposal_ext={self.proposal_ext}, ' + f'feature_ext={self.feature_ext})') + return repr_str diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawframe_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawframe_dataset.py new file mode 100644 index 0000000..6c63285 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawframe_dataset.py @@ -0,0 +1,190 @@ +import copy +import os.path as osp + +import numpy as np +import torch + +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class RawframeDataset(BaseDataset): + """Rawframe dataset for action recognition. + + The dataset loads raw frames and apply specified transforms to return a + dict containing the frame tensors and other information. + + The ann_file is a text file with multiple lines, and each line indicates + the directory to frames of a video, total frames of the video and + the label of a video, which are split with a whitespace. + Example of a annotation file: + + .. code-block:: txt + + some/directory-1 163 1 + some/directory-2 122 1 + some/directory-3 258 2 + some/directory-4 234 2 + some/directory-5 295 3 + some/directory-6 121 3 + + Example of a multi-class annotation file: + + + .. code-block:: txt + + some/directory-1 163 1 3 5 + some/directory-2 122 1 2 + some/directory-3 258 2 + some/directory-4 234 2 4 6 8 + some/directory-5 295 3 + some/directory-6 121 3 + + Example of a with_offset annotation file (clips from long videos), each + line indicates the directory to frames of a video, the index of the start + frame, total frames of the video clip and the label of a video clip, which + are split with a whitespace. + + + .. code-block:: txt + + some/directory-1 12 163 3 + some/directory-2 213 122 4 + some/directory-3 100 258 5 + some/directory-4 98 234 2 + some/directory-5 0 295 3 + some/directory-6 50 121 3 + + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + data_prefix (str | None): Path to a directory where videos are held. + Default: None. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + filename_tmpl (str): Template for each filename. + Default: 'img_{:05}.jpg'. + with_offset (bool): Determines whether the offset information is in + ann_file. Default: False. + multi_class (bool): Determines whether it is a multi-class + recognition dataset. Default: False. + num_classes (int | None): Number of classes in the dataset. + Default: None. + modality (str): Modality of data. Support 'RGB', 'Flow'. + Default: 'RGB'. + sample_by_class (bool): Sampling by class, should be set `True` when + performing inter-class data balancing. Only compatible with + `multi_class == False`. Only applies for training. Default: False. + power (float | None): We support sampling data with the probability + proportional to the power of its label frequency (freq ^ power) + when sampling data. `power == 1` indicates uniformly sampling all + data; `power == 0` indicates uniformly sampling all classes. + Default: None. + """ + + def __init__(self, + ann_file, + pipeline, + data_prefix=None, + test_mode=False, + filename_tmpl='img_{:05}.jpg', + with_offset=False, + multi_class=False, + num_classes=None, + start_index=1, + modality='RGB', + sample_by_class=False, + power=None): + self.filename_tmpl = filename_tmpl + self.with_offset = with_offset + super().__init__( + ann_file, + pipeline, + data_prefix, + test_mode, + multi_class, + num_classes, + start_index, + modality, + sample_by_class=sample_by_class, + power=power) + + def load_annotations(self): + """Load annotation file to get video information.""" + if self.ann_file.endswith('.json'): + return self.load_json_annotations() + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split() + video_info = {} + idx = 0 + # idx for frame_dir + frame_dir = line_split[idx] + if self.data_prefix is not None: + frame_dir = osp.join(self.data_prefix, frame_dir) + video_info['frame_dir'] = frame_dir + idx += 1 + if self.with_offset: + # idx for offset and total_frames + video_info['offset'] = int(line_split[idx]) + video_info['total_frames'] = int(line_split[idx + 1]) + idx += 2 + else: + # idx for total_frames + video_info['total_frames'] = int(line_split[idx]) + idx += 1 + # idx for label[s] + label = [int(x) for x in line_split[idx:]] + assert label, f'missing label in line: {line}' + if self.multi_class: + assert self.num_classes is not None + video_info['label'] = label + else: + assert len(label) == 1 + video_info['label'] = label[0] + video_infos.append(video_info) + + return video_infos + + def prepare_train_frames(self, idx): + """Prepare the frames for training given the index.""" + if self.sample_by_class: + # Then, the idx is the class index + samples = self.video_infos_by_class[idx] + results = copy.deepcopy(np.random.choice(samples)) + else: + results = copy.deepcopy(self.video_infos[idx]) + results['filename_tmpl'] = self.filename_tmpl + results['modality'] = self.modality + results['start_index'] = self.start_index + + # prepare tensor in getitem + if self.multi_class: + onehot = torch.zeros(self.num_classes) + onehot[results['label']] = 1. + results['label'] = onehot + + return self.pipeline(results) + + def prepare_test_frames(self, idx): + """Prepare the frames for testing given the index.""" + if self.sample_by_class: + # Then, the idx is the class index + samples = self.video_infos_by_class[idx] + results = copy.deepcopy(np.random.choice(samples)) + else: + results = copy.deepcopy(self.video_infos[idx]) + results['filename_tmpl'] = self.filename_tmpl + results['modality'] = self.modality + results['start_index'] = self.start_index + + # prepare tensor in getitem + if self.multi_class: + onehot = torch.zeros(self.num_classes) + onehot[results['label']] = 1. + results['label'] = onehot + + return self.pipeline(results) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawvideo_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawvideo_dataset.py new file mode 100644 index 0000000..5ba5e61 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/rawvideo_dataset.py @@ -0,0 +1,146 @@ +import copy +import os.path as osp +import random + +import mmcv + +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class RawVideoDataset(BaseDataset): + """RawVideo dataset for action recognition, used in the Project OmniSource. + + The dataset loads clips of raw videos and apply specified transforms to + return a dict containing the frame tensors and other information. Not that + for this dataset, `multi_class` should be False. + + The ann_file is a text file with multiple lines, and each line indicates + a sample video with the filepath (without suffix), label, number of clips + and index of positive clips (starting from 0), which are split with a + whitespace. Raw videos should be first trimmed into 10 second clips, + organized in the following format: + + .. code-block:: txt + + some/path/D32_1gwq35E/part_0.mp4 + some/path/D32_1gwq35E/part_1.mp4 + ...... + some/path/D32_1gwq35E/part_n.mp4 + + Example of a annotation file: + + .. code-block:: txt + + some/path/D32_1gwq35E 66 10 0 1 2 + some/path/-G-5CJ0JkKY 254 5 3 4 + some/path/T4h1bvOd9DA 33 1 0 + some/path/4uZ27ivBl00 341 2 0 1 + some/path/0LfESFkfBSw 186 234 7 9 11 + some/path/-YIsNpBEx6c 169 100 9 10 11 + + The first line indicates that the raw video `some/path/D32_1gwq35E` has + action label `66`, consists of 10 clips (from `part_0.mp4` to + `part_9.mp4`). The 1st, 2nd and 3rd clips are positive clips. + + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + sampling_strategy (str): The strategy to sample clips from raw videos. + Choices are 'random' or 'positive'. Default: 'positive'. + clipname_tmpl (str): The template of clip name in the raw video. + Default: 'part_{}.mp4'. + **kwargs: Keyword arguments for ``BaseDataset``. + """ + + def __init__(self, + ann_file, + pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='positive', + **kwargs): + super().__init__(ann_file, pipeline, start_index=0, **kwargs) + assert self.multi_class is False + self.sampling_strategy = sampling_strategy + self.clipname_tmpl = clipname_tmpl + # If positive, we should only keep those raw videos with positive + # clips + if self.sampling_strategy == 'positive': + self.video_infos = [ + x for x in self.video_infos if len(x['positive_clip_inds']) + ] + + # do not support multi_class + def load_annotations(self): + """Load annotation file to get video information.""" + if self.ann_file.endswith('.json'): + return self.load_json_annotations() + + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split() + video_dir = line_split[0] + label = int(line_split[1]) + num_clips = int(line_split[2]) + positive_clip_inds = [int(ind) for ind in line_split[3:]] + + if self.data_prefix is not None: + video_dir = osp.join(self.data_prefix, video_dir) + video_infos.append( + dict( + video_dir=video_dir, + label=label, + num_clips=num_clips, + positive_clip_inds=positive_clip_inds)) + return video_infos + + # do not support multi_class + def load_json_annotations(self): + """Load json annotation file to get video information.""" + video_infos = mmcv.load(self.ann_file) + num_videos = len(video_infos) + path_key = 'video_dir' + for i in range(num_videos): + if self.data_prefix is not None: + path_value = video_infos[i][path_key] + path_value = osp.join(self.data_prefix, path_value) + video_infos[i][path_key] = path_value + return video_infos + + def sample_clip(self, results): + """Sample a clip from the raw video given the sampling strategy.""" + assert self.sampling_strategy in ['positive', 'random'] + if self.sampling_strategy == 'positive': + assert results['positive_clip_inds'] + ind = random.choice(results['positive_clip_inds']) + else: + ind = random.randint(0, results['num_clips'] - 1) + clipname = self.clipname_tmpl.format(ind) + + # if the first char of self.clipname_tmpl is a letter, use osp.join; + # otherwise, directly concat them + if self.clipname_tmpl[0].isalpha(): + filename = osp.join(results['video_dir'], clipname) + else: + filename = results['video_dir'] + clipname + results['filename'] = filename + return results + + def prepare_train_frames(self, idx): + """Prepare the frames for training given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + results = self.sample_clip(results) + results['modality'] = self.modality + results['start_index'] = self.start_index + return self.pipeline(results) + + def prepare_test_frames(self, idx): + """Prepare the frames for testing given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + results = self.sample_clip(results) + results['modality'] = self.modality + results['start_index'] = self.start_index + return self.pipeline(results) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/registry.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/registry.py new file mode 100644 index 0000000..2ebc753 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/registry.py @@ -0,0 +1,4 @@ +from mmcv.utils import Registry + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/__init__.py new file mode 100644 index 0000000..1299b6b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/__init__.py @@ -0,0 +1,3 @@ +from .distributed_sampler import DistributedPowerSampler, DistributedSampler + +__all__ = ['DistributedSampler', 'DistributedPowerSampler'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/distributed_sampler.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/distributed_sampler.py new file mode 100644 index 0000000..59fee0c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/samplers/distributed_sampler.py @@ -0,0 +1,74 @@ +import torch +from torch.utils.data import DistributedSampler as _DistributedSampler + + +class DistributedSampler(_DistributedSampler): + """DistributedSampler inheriting from + ``torch.utils.data.DistributedSampler``. + + In pytorch of lower versions, there is no ``shuffle`` argument. This child + class will port one to DistributedSampler. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.shuffle = shuffle + + def __iter__(self): + # deterministically shuffle based on epoch + if self.shuffle: + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + return iter(indices) + + +class DistributedPowerSampler(_DistributedSampler): + """DistributedPowerSampler inheriting from + ``torch.utils.data.DistributedSampler``. + + Samples are sampled with the probability that is proportional to the power + of label frequency (freq ^ power). The sampler only applies to single class + recognition dataset. + + The default value of power is 1, which is equivalent to bootstrap sampling + from the entire dataset. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, power=1): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.power = power + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + video_infos_by_class = self.dataset.video_infos_by_class + num_classes = self.dataset.num_classes + # For simplicity, discontinuous labels are not permitted + assert set(video_infos_by_class) == set(range(num_classes)) + counts = [len(video_infos_by_class[i]) for i in range(num_classes)] + counts = [cnt**self.power for cnt in counts] + + indices = torch.multinomial( + torch.Tensor(counts), + self.total_size, + replacement=True, + generator=g) + indices = indices.data.numpy().tolist() + assert len(indices) == self.total_size + + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/ssn_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/ssn_dataset.py new file mode 100644 index 0000000..26cb7de --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/ssn_dataset.py @@ -0,0 +1,880 @@ +import copy +import os.path as osp +import warnings + +import mmcv +import numpy as np +from torch.nn.modules.utils import _pair + +from ..core import softmax +from ..localization import (eval_ap, load_localize_proposal_file, + perform_regression, temporal_iou, temporal_nms) +from ..utils import get_root_logger +from .base import BaseDataset +from .registry import DATASETS + + +class SSNInstance: + """Proposal instance of SSN. + + Args: + start_frame (int): Index of the proposal's start frame. + end_frame (int): Index of the proposal's end frame. + num_video_frames (int): Total frames of the video. + label (int | None): The category label of the proposal. Default: None. + best_iou (float): The highest IOU with the groundtruth instance. + Default: 0. + overlap_self (float): Percent of the proposal's own span contained + in a groundtruth instance. Default: 0. + """ + + def __init__(self, + start_frame, + end_frame, + num_video_frames, + label=None, + best_iou=0, + overlap_self=0): + self.start_frame = start_frame + self.end_frame = min(end_frame, num_video_frames) + self.num_video_frames = num_video_frames + self.label = label if label is not None else -1 + self.coverage = (end_frame - start_frame) / num_video_frames + self.best_iou = best_iou + self.overlap_self = overlap_self + self.loc_reg = None + self.size_reg = None + self.regression_targets = [0., 0.] + + def compute_regression_targets(self, gt_list): + """Compute regression targets of positive proposals. + + Args: + gt_list (list): The list of groundtruth instances. + """ + # Find the groundtruth instance with the highest IOU. + ious = [ + temporal_iou(self.start_frame, self.end_frame, gt.start_frame, + gt.end_frame) for gt in gt_list + ] + best_gt = gt_list[np.argmax(ious)] + + # interval: [start_frame, end_frame) + proposal_center = (self.start_frame + self.end_frame - 1) / 2 + gt_center = (best_gt.start_frame + best_gt.end_frame - 1) / 2 + proposal_size = self.end_frame - self.start_frame + gt_size = best_gt.end_frame - best_gt.start_frame + + # Get regression targets: + # (1). Localization regression target: + # center shift proportional to the proposal duration + # (2). Duration/Size regression target: + # logarithm of the groundtruth duration over proposal duration + + self.loc_reg = (gt_center - proposal_center) / proposal_size + self.size_reg = np.log(gt_size / proposal_size) + self.regression_targets = ([self.loc_reg, self.size_reg] + if self.loc_reg is not None else [0., 0.]) + + +@DATASETS.register_module() +class SSNDataset(BaseDataset): + """Proposal frame dataset for Structured Segment Networks. + + Based on proposal information, the dataset loads raw frames and applies + specified transforms to return a dict containing the frame tensors and + other information. + + The ann_file is a text file with multiple lines and each + video's information takes up several lines. This file can be a normalized + file with percent or standard file with specific frame indexes. If the file + is a normalized file, it will be converted into a standard file first. + + Template information of a video in a standard file: + .. code-block:: txt + # index + video_id + num_frames + fps + num_gts + label, start_frame, end_frame + label, start_frame, end_frame + ... + num_proposals + label, best_iou, overlap_self, start_frame, end_frame + label, best_iou, overlap_self, start_frame, end_frame + ... + + Example of a standard annotation file: + .. code-block:: txt + # 0 + video_validation_0000202 + 5666 + 1 + 3 + 8 130 185 + 8 832 1136 + 8 1303 1381 + 5 + 8 0.0620 0.0620 790 5671 + 8 0.1656 0.1656 790 2619 + 8 0.0833 0.0833 3945 5671 + 8 0.0960 0.0960 4173 5671 + 8 0.0614 0.0614 3327 5671 + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + train_cfg (dict): Config for training. + test_cfg (dict): Config for testing. + data_prefix (str): Path to a directory where videos are held. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + filename_tmpl (str): Template for each filename. + Default: 'img_{:05}.jpg'. + start_index (int): Specify a start index for frames in consideration of + different filename format. Default: 1. + modality (str): Modality of data. Support 'RGB', 'Flow'. + Default: 'RGB'. + video_centric (bool): Whether to sample proposals just from + this video or sample proposals randomly from the entire dataset. + Default: True. + reg_normalize_constants (list): Regression target normalized constants, + including mean and standard deviation of location and duration. + body_segments (int): Number of segments in course period. + Default: 5. + aug_segments (list[int]): Number of segments in starting and + ending period. Default: (2, 2). + aug_ratio (int | float | tuple[int | float]): The ratio of the length + of augmentation to that of the proposal. Defualt: (0.5, 0.5). + clip_len (int): Frames of each sampled output clip. + Default: 1. + frame_interval (int): Temporal interval of adjacent sampled frames. + Default: 1. + filter_gt (bool): Whether to filter videos with no annotation + during training. Default: True. + use_regression (bool): Whether to perform regression. Default: True. + verbose (bool): Whether to print full information or not. + Default: False. + """ + + def __init__(self, + ann_file, + pipeline, + train_cfg, + test_cfg, + data_prefix, + test_mode=False, + filename_tmpl='img_{:05d}.jpg', + start_index=1, + modality='RGB', + video_centric=True, + reg_normalize_constants=None, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=(0.5, 0.5), + clip_len=1, + frame_interval=1, + filter_gt=True, + use_regression=True, + verbose=False): + self.logger = get_root_logger() + super().__init__( + ann_file, + pipeline, + data_prefix=data_prefix, + test_mode=test_mode, + start_index=start_index, + modality=modality) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.assigner = train_cfg.ssn.assigner + self.sampler = train_cfg.ssn.sampler + self.evaluater = test_cfg.ssn.evaluater + self.verbose = verbose + self.filename_tmpl = filename_tmpl + + if filter_gt or not test_mode: + valid_inds = [ + i for i, video_info in enumerate(self.video_infos) + if len(video_info['gts']) > 0 + ] + self.logger.info(f'{len(valid_inds)} out of {len(self.video_infos)} ' + f'videos are valid.') + self.video_infos = [self.video_infos[i] for i in valid_inds] + + # construct three pools: + # 1. Positive(Foreground) + # 2. Background + # 3. Incomplete + self.positive_pool = [] + self.background_pool = [] + self.incomplete_pool = [] + self.construct_proposal_pools() + + if reg_normalize_constants is None: + self.reg_norm_consts = self._compute_reg_normalize_constants() + else: + self.reg_norm_consts = reg_normalize_constants + self.video_centric = video_centric + self.body_segments = body_segments + self.aug_segments = aug_segments + self.aug_ratio = _pair(aug_ratio) + if not mmcv.is_tuple_of(self.aug_ratio, (int, float)): + raise TypeError(f'aug_ratio should be int, float' + f'or tuple of int and float, ' + f'but got {type(aug_ratio)}') + assert len(self.aug_ratio) == 2 + + total_ratio = ( + self.sampler.positive_ratio + self.sampler.background_ratio + + self.sampler.incomplete_ratio) + self.positive_per_video = int( + self.sampler.num_per_video * + (self.sampler.positive_ratio / total_ratio)) + self.background_per_video = int( + self.sampler.num_per_video * + (self.sampler.background_ratio / total_ratio)) + self.incomplete_per_video = ( + self.sampler.num_per_video - self.positive_per_video - + self.background_per_video) + + self.test_interval = self.test_cfg.ssn.sampler.test_interval + # number of consecutive frames + self.clip_len = clip_len + # number of steps (sparse sampling for efficiency of io) + self.frame_interval = frame_interval + + # test mode or not + self.filter_gt = filter_gt + self.use_regression = use_regression + self.test_mode = test_mode + + # yapf: disable + if self.verbose: + self.logger.info(f""" + SSNDataset: proposal file {self.proposal_file} parsed. + + There are {len(self.positive_pool) + len(self.background_pool) + + len(self.incomplete_pool)} usable proposals from {len(self.video_infos)} videos. + {len(self.positive_pool)} positive proposals + {len(self.incomplete_pool)} incomplete proposals + {len(self.background_pool)} background proposals + + Sample config: + FG/BG/INCOMP: {self.positive_per_video}/{self.background_per_video}/{self.incomplete_per_video} # noqa:E501 + Video Centric: {self.video_centric} + + Regression Normalization Constants: + Location: mean {self.reg_norm_consts[0][0]:.05f} std {self.reg_norm_consts[1][0]:.05f} # noqa: E501 + Duration: mean {self.reg_norm_consts[0][1]:.05f} std {self.reg_norm_consts[1][1]:.05f} # noqa: E501 + """) + # yapf: enable + else: + self.logger.info( + f'SSNDataset: proposal file {self.proposal_file} parsed.') + + def load_annotations(self): + """Load annotation file to get video information.""" + video_infos = [] + if 'normalized_' in self.ann_file: + self.proposal_file = self.ann_file.replace('normalized_', '') + if not osp.exists(self.proposal_file): + raise Exception(f'Please refer to `$MMACTION2/tools/data` to' + f'denormalize {self.ann_file}.') + else: + self.proposal_file = self.ann_file + proposal_infos = load_localize_proposal_file(self.proposal_file) + # proposal_info:[video_id, num_frames, gt_list, proposal_list] + # gt_list member: [label, start_frame, end_frame] + # proposal_list member: [label, best_iou, overlap_self, + # start_frame, end_frame] + for proposal_info in proposal_infos: + if self.data_prefix is not None: + frame_dir = osp.join(self.data_prefix, proposal_info[0]) + num_frames = int(proposal_info[1]) + # gts:start, end, num_frames, class_label, tIoU=1 + gts = [] + for x in proposal_info[2]: + if int(x[2]) > int(x[1]) and int(x[1]) < num_frames: + ssn_instance = SSNInstance( + int(x[1]), + int(x[2]), + num_frames, + label=int(x[0]), + best_iou=1.0) + gts.append(ssn_instance) + # proposals:start, end, num_frames, class_label + # tIoU=best_iou, overlap_self + proposals = [] + for x in proposal_info[3]: + if int(x[4]) > int(x[3]) and int(x[3]) < num_frames: + ssn_instance = SSNInstance( + int(x[3]), + int(x[4]), + num_frames, + label=int(x[0]), + best_iou=float(x[1]), + overlap_self=float(x[2])) + proposals.append(ssn_instance) + video_infos.append( + dict( + frame_dir=frame_dir, + video_id=proposal_info[0], + total_frames=num_frames, + gts=gts, + proposals=proposals)) + return video_infos + + def results_to_detections(self, results, top_k=2000, **kwargs): + """Convert prediction results into detections. + + Args: + results (list): Prediction results. + top_k (int): Number of top results. Default: 2000. + + Returns: + list: Detection results. + """ + num_classes = results[0]['activity_scores'].shape[1] - 1 + detections = [dict() for _ in range(num_classes)] + + for idx in range(len(self)): + video_id = self.video_infos[idx]['video_id'] + relative_proposals = results[idx]['relative_proposal_list'] + if len(relative_proposals[0].shape) == 3: + relative_proposals = np.squeeze(relative_proposals, 0) + + activity_scores = results[idx]['activity_scores'] + completeness_scores = results[idx]['completeness_scores'] + regression_scores = results[idx]['bbox_preds'] + if regression_scores is None: + regression_scores = np.zeros( + (len(relative_proposals), num_classes, 2), + dtype=np.float32) + regression_scores = regression_scores.reshape((-1, num_classes, 2)) + + if top_k <= 0: + combined_scores = ( + softmax(activity_scores[:, 1:], dim=1) * + np.exp(completeness_scores)) + for i in range(num_classes): + center_scores = regression_scores[:, i, 0][:, None] + duration_scores = regression_scores[:, i, 1][:, None] + detections[i][video_id] = np.concatenate( + (relative_proposals, combined_scores[:, i][:, None], + center_scores, duration_scores), + axis=1) + else: + combined_scores = ( + softmax(activity_scores[:, 1:], dim=1) * + np.exp(completeness_scores)) + keep_idx = np.argsort(combined_scores.ravel())[-top_k:] + for k in keep_idx: + class_idx = k % num_classes + proposal_idx = k // num_classes + new_item = [ + relative_proposals[proposal_idx, 0], + relative_proposals[proposal_idx, + 1], combined_scores[proposal_idx, + class_idx], + regression_scores[proposal_idx, class_idx, + 0], regression_scores[proposal_idx, + class_idx, 1] + ] + if video_id not in detections[class_idx]: + detections[class_idx][video_id] = np.array([new_item]) + else: + detections[class_idx][video_id] = np.vstack( + [detections[class_idx][video_id], new_item]) + + return detections + + def evaluate(self, + results, + metrics='mAP', + metric_options=dict(mAP=dict(eval_dataset='thumos14')), + logger=None, + **deprecated_kwargs): + """Evaluation in SSN proposal dataset. + + Args: + results (list[dict]): Output results. + metrics (str | sequence[str]): Metrics to be performed. + Defaults: 'mAP'. + metric_options (dict): Dict for metric options. Options are + ``eval_dataset`` for ``mAP``. + Default: ``dict(mAP=dict(eval_dataset='thumos14'))``. + logger (logging.Logger | None): Logger for recording. + Default: None. + deprecated_kwargs (dict): Used for containing deprecated arguments. + See 'https://github.com/open-mmlab/mmaction2/pull/286'. + + Returns: + dict: Evaluation results for evaluation metrics. + """ + # Protect ``metric_options`` since it uses mutable value as default + metric_options = copy.deepcopy(metric_options) + + if deprecated_kwargs != {}: + warnings.warn( + 'Option arguments for metrics has been changed to ' + "`metric_options`, See 'https://github.com/open-mmlab/mmaction2/pull/286' " # noqa: E501 + 'for more details') + metric_options['mAP'] = dict(metric_options['mAP'], + **deprecated_kwargs) + + if not isinstance(results, list): + raise TypeError(f'results must be a list, but got {type(results)}') + assert len(results) == len(self), ( + f'The length of results is not equal to the dataset len: ' + f'{len(results)} != {len(self)}') + + metrics = metrics if isinstance(metrics, (list, tuple)) else [metrics] + allowed_metrics = ['mAP'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + + detections = self.results_to_detections(results, **self.evaluater) + + if self.use_regression: + self.logger.info('Performing location regression') + for class_idx, _ in enumerate(detections): + detections[class_idx] = { + k: perform_regression(v) + for k, v in detections[class_idx].items() + } + self.logger.info('Regression finished') + + self.logger.info('Performing NMS') + for class_idx, _ in enumerate(detections): + detections[class_idx] = { + k: temporal_nms(v, self.evaluater.nms) + for k, v in detections[class_idx].items() + } + self.logger.info('NMS finished') + + # get gts + all_gts = self.get_all_gts() + for class_idx, _ in enumerate(detections): + if class_idx not in all_gts: + all_gts[class_idx] = dict() + + # get predictions + plain_detections = {} + for class_idx, _ in enumerate(detections): + detection_list = [] + for video, dets in detections[class_idx].items(): + detection_list.extend([[video, class_idx] + x[:3] + for x in dets.tolist()]) + plain_detections[class_idx] = detection_list + + eval_results = {} + for metric in metrics: + if metric == 'mAP': + eval_dataset = metric_options.setdefault('mAP', {}).setdefault( + 'eval_dataset', 'thumos14') + if eval_dataset == 'thumos14': + iou_range = np.arange(0.1, 1.0, .1) + ap_values = eval_ap(plain_detections, all_gts, iou_range) + map_ious = ap_values.mean(axis=0) + self.logger.info('Evaluation finished') + + for iou, map_iou in zip(iou_range, map_ious): + eval_results[f'mAP@{iou:.02f}'] = map_iou + + return eval_results + + def construct_proposal_pools(self): + """Construct positve proposal pool, incomplete proposal pool and + background proposal pool of the entire dataset.""" + for video_info in self.video_infos: + positives = self.get_positives( + video_info['gts'], video_info['proposals'], + self.assigner.positive_iou_threshold, + self.sampler.add_gt_as_proposals) + self.positive_pool.extend([(video_info['video_id'], proposal) + for proposal in positives]) + + incompletes, backgrounds = self.get_negatives( + video_info['proposals'], + self.assigner.incomplete_iou_threshold, + self.assigner.background_iou_threshold, + self.assigner.background_coverage_threshold, + self.assigner.incomplete_overlap_threshold) + self.incomplete_pool.extend([(video_info['video_id'], proposal) + for proposal in incompletes]) + self.background_pool.extend([video_info['video_id'], proposal] + for proposal in backgrounds) + + def get_all_gts(self): + """Fetch groundtruth instances of the entire dataset.""" + gts = {} + for video_info in self.video_infos: + video = video_info['video_id'] + for gt in video_info['gts']: + class_idx = gt.label - 1 + # gt_info: [relative_start, relative_end] + gt_info = [ + gt.start_frame / video_info['total_frames'], + gt.end_frame / video_info['total_frames'] + ] + gts.setdefault(class_idx, {}).setdefault(video, + []).append(gt_info) + + return gts + + @staticmethod + def get_positives(gts, proposals, positive_threshold, with_gt=True): + """Get positive/foreground proposals. + + Args: + gts (list): List of groundtruth instances(:obj:`SSNInstance`). + proposals (list): List of proposal instances(:obj:`SSNInstance`). + positive_threshold (float): Minimum threshold of overlap of + positive/foreground proposals and groundtruths. + with_gt (bool): Whether to include groundtruth instances in + positive proposals. Default: True. + + Returns: + list[:obj:`SSNInstance`]: (positives), positives is a list + comprised of positive proposal instances. + """ + positives = [ + proposal for proposal in proposals + if proposal.best_iou > positive_threshold + ] + + if with_gt: + positives.extend(gts) + + for proposal in positives: + proposal.compute_regression_targets(gts) + + return positives + + @staticmethod + def get_negatives(proposals, + incomplete_iou_threshold, + background_iou_threshold, + background_coverage_threshold=0.01, + incomplete_overlap_threshold=0.7): + """Get negative proposals, including incomplete proposals and + background proposals. + + Args: + proposals (list): List of proposal instances(:obj:`SSNInstance`). + incomplete_iou_threshold (float): Maximum threshold of overlap + of incomplete proposals and groundtruths. + background_iou_threshold (float): Maximum threshold of overlap + of background proposals and groundtruths. + background_coverage_threshold (float): Minimum coverage + of background proposals in video duration. Default: 0.01. + incomplete_overlap_threshold (float): Minimum percent of incomplete + proposals' own span contained in a groundtruth instance. + Default: 0.7. + + Returns: + list[:obj:`SSNInstance`]: (incompletes, backgrounds), incompletes + and backgrounds are lists comprised of incomplete + proposal instances and background proposal instances. + """ + incompletes = [] + backgrounds = [] + + for proposal in proposals: + if (proposal.best_iou < incomplete_iou_threshold + and proposal.overlap_self > incomplete_overlap_threshold): + incompletes.append(proposal) + elif (proposal.best_iou < background_iou_threshold + and proposal.coverage > background_coverage_threshold): + backgrounds.append(proposal) + + return incompletes, backgrounds + + def _video_centric_sampling(self, record): + """Sample proposals from the this video instance. + + Args: + record (dict): Information of the video instance(video_info[idx]). + key: frame_dir, video_id, total_frames, + gts: List of groundtruth instances(:obj:`SSNInstance`). + proposals: List of proposal instances(:obj:`SSNInstance`). + """ + positives = self.get_positives(record['gts'], record['proposals'], + self.assigner.positive_iou_threshold, + self.sampler.add_gt_as_proposals) + incompletes, backgrounds = self.get_negatives( + record['proposals'], self.assigner.incomplete_iou_threshold, + self.assigner.background_iou_threshold, + self.assigner.background_coverage_threshold, + self.assigner.incomplete_overlap_threshold) + + def sample_video_proposals(proposal_type, video_id, video_pool, + num_requested_proposals, dataset_pool): + """This method will sample proposals from the this video pool. If + the video pool is empty, it will fetch from the dataset pool + (collect proposal of the entire dataset). + + Args: + proposal_type (int): Type id of proposal. + Positive/Foreground: 0 + Negative: + Incomplete: 1 + Background: 2 + video_id (str): Name of the video. + video_pool (list): Pool comprised of proposals in this video. + num_requested_proposals (int): Number of proposals + to be sampled. + dataset_pool (list): Proposals of the entire dataset. + + Returns: + list[(str, :obj:`SSNInstance`), int]: + video_id (str): Name of the video. + :obj:`SSNInstance`: Instance of class SSNInstance. + proposal_type (int): Type of proposal. + """ + + if len(video_pool) == 0: + idx = np.random.choice( + len(dataset_pool), num_requested_proposals, replace=False) + return [(dataset_pool[x], proposal_type) for x in idx] + + replicate = len(video_pool) < num_requested_proposals + idx = np.random.choice( + len(video_pool), num_requested_proposals, replace=replicate) + return [((video_id, video_pool[x]), proposal_type) for x in idx] + + out_proposals = [] + out_proposals.extend( + sample_video_proposals(0, record['video_id'], positives, + self.positive_per_video, + self.positive_pool)) + out_proposals.extend( + sample_video_proposals(1, record['video_id'], incompletes, + self.incomplete_per_video, + self.incomplete_pool)) + out_proposals.extend( + sample_video_proposals(2, record['video_id'], backgrounds, + self.background_per_video, + self.background_pool)) + + return out_proposals + + def _random_sampling(self): + """Randomly sample proposals from the entire dataset.""" + out_proposals = [] + + positive_idx = np.random.choice( + len(self.positive_pool), + self.positive_per_video, + replace=len(self.positive_pool) < self.positive_per_video) + out_proposals.extend([(self.positive_pool[x], 0) + for x in positive_idx]) + incomplete_idx = np.random.choice( + len(self.incomplete_pool), + self.incomplete_per_video, + replace=len(self.incomplete_pool) < self.incomplete_per_video) + out_proposals.extend([(self.incomplete_pool[x], 1) + for x in incomplete_idx]) + background_idx = np.random.choice( + len(self.background_pool), + self.background_per_video, + replace=len(self.background_pool) < self.background_per_video) + out_proposals.extend([(self.background_pool[x], 2) + for x in background_idx]) + + return out_proposals + + def _get_stage(self, proposal, num_frames): + """Fetch the scale factor of starting and ending stage and get the + stage split. + + Args: + proposal (:obj:`SSNInstance`): Proposal instance. + num_frames (int): Total frames of the video. + + Returns: + tuple[float, float, list]: (starting_scale_factor, + ending_scale_factor, stage_split), starting_scale_factor is + the ratio of the effective sampling length to augment length + in starting stage, ending_scale_factor is the ratio of the + effective sampling length to augment length in ending stage, + stage_split is ending segment id of starting, course and + ending stage. + """ + # proposal interval: [start_frame, end_frame) + start_frame = proposal.start_frame + end_frame = proposal.end_frame + ori_clip_len = self.clip_len * self.frame_interval + + duration = end_frame - start_frame + assert duration != 0 + + valid_starting = max(0, + start_frame - int(duration * self.aug_ratio[0])) + valid_ending = min(num_frames - ori_clip_len + 1, + end_frame - 1 + int(duration * self.aug_ratio[1])) + + valid_starting_length = start_frame - valid_starting - ori_clip_len + valid_ending_length = (valid_ending - end_frame + 1) - ori_clip_len + + starting_scale_factor = ((valid_starting_length + ori_clip_len + 1) / + (duration * self.aug_ratio[0])) + ending_scale_factor = (valid_ending_length + ori_clip_len + 1) / ( + duration * self.aug_ratio[1]) + + aug_start, aug_end = self.aug_segments + stage_split = [ + aug_start, aug_start + self.body_segments, + aug_start + self.body_segments + aug_end + ] + + return starting_scale_factor, ending_scale_factor, stage_split + + def _compute_reg_normalize_constants(self): + """Compute regression target normalized constants.""" + if self.verbose: + self.logger.info('Compute regression target normalized constants') + targets = [] + for video_info in self.video_infos: + positives = self.get_positives( + video_info['gts'], video_info['proposals'], + self.assigner.positive_iou_threshold, False) + for positive in positives: + targets.append(list(positive.regression_targets)) + + return np.array((np.mean(targets, axis=0), np.std(targets, axis=0))) + + def prepare_train_frames(self, idx): + """Prepare the frames for training given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + results['filename_tmpl'] = self.filename_tmpl + results['modality'] = self.modality + results['start_index'] = self.start_index + + if self.video_centric: + # yapf: disable + results['out_proposals'] = self._video_centric_sampling(self.video_infos[idx]) # noqa: E501 + # yapf: enable + else: + results['out_proposals'] = self._random_sampling() + + out_proposal_scale_factor = [] + out_proposal_type = [] + out_proposal_labels = [] + out_proposal_reg_targets = [] + + for idx, proposal in enumerate(results['out_proposals']): + # proposal: [(video_id, SSNInstance), proposal_type] + num_frames = proposal[0][1].num_video_frames + + (starting_scale_factor, ending_scale_factor, + _) = self._get_stage(proposal[0][1], num_frames) + + # proposal[1]: Type id of proposal. + # Positive/Foreground: 0 + # Negative: + # Incomplete: 1 + # Background: 2 + + # Positivte/Foreground proposal + if proposal[1] == 0: + label = proposal[0][1].label + # Incomplete proposal + elif proposal[1] == 1: + label = proposal[0][1].label + # Background proposal + elif proposal[1] == 2: + label = 0 + else: + raise ValueError(f'Proposal type should be 0, 1, or 2,' + f'but got {proposal[1]}') + out_proposal_scale_factor.append( + [starting_scale_factor, ending_scale_factor]) + if not isinstance(label, int): + raise TypeError(f'proposal_label must be an int,' + f'but got {type(label)}') + out_proposal_labels.append(label) + out_proposal_type.append(proposal[1]) + + reg_targets = proposal[0][1].regression_targets + if proposal[1] == 0: + # Normalize regression targets of positive proposals. + reg_targets = ((reg_targets[0] - self.reg_norm_consts[0][0]) / + self.reg_norm_consts[1][0], + (reg_targets[1] - self.reg_norm_consts[0][1]) / + self.reg_norm_consts[1][1]) + out_proposal_reg_targets.append(reg_targets) + + results['reg_targets'] = np.array( + out_proposal_reg_targets, dtype=np.float32) + results['proposal_scale_factor'] = np.array( + out_proposal_scale_factor, dtype=np.float32) + results['proposal_labels'] = np.array(out_proposal_labels) + results['proposal_type'] = np.array(out_proposal_type) + + return self.pipeline(results) + + def prepare_test_frames(self, idx): + """Prepare the frames for testing given the index.""" + results = copy.deepcopy(self.video_infos[idx]) + results['filename_tmpl'] = self.filename_tmpl + results['modality'] = self.modality + results['start_index'] = self.start_index + + proposals = results['proposals'] + num_frames = results['total_frames'] + ori_clip_len = self.clip_len * self.frame_interval + frame_ticks = np.arange( + 0, num_frames - ori_clip_len, self.test_interval, dtype=int) + 1 + + num_sampled_frames = len(frame_ticks) + + if len(proposals) == 0: + proposals.append(SSNInstance(0, num_frames - 1, num_frames)) + + relative_proposal_list = [] + proposal_tick_list = [] + scale_factor_list = [] + + for proposal in proposals: + relative_proposal = (proposal.start_frame / num_frames, + proposal.end_frame / num_frames) + relative_duration = relative_proposal[1] - relative_proposal[0] + relative_starting_duration = relative_duration * self.aug_ratio[0] + relative_ending_duration = relative_duration * self.aug_ratio[1] + relative_starting = ( + relative_proposal[0] - relative_starting_duration) + relative_ending = relative_proposal[1] + relative_ending_duration + + real_relative_starting = max(0.0, relative_starting) + real_relative_ending = min(1.0, relative_ending) + + starting_scale_factor = ( + (relative_proposal[0] - real_relative_starting) / + relative_starting_duration) + ending_scale_factor = ( + (real_relative_ending - relative_proposal[1]) / + relative_ending_duration) + + proposal_ranges = (real_relative_starting, *relative_proposal, + real_relative_ending) + proposal_ticks = (np.array(proposal_ranges) * + num_sampled_frames).astype(np.int32) + + relative_proposal_list.append(relative_proposal) + proposal_tick_list.append(proposal_ticks) + scale_factor_list.append( + (starting_scale_factor, ending_scale_factor)) + + results['relative_proposal_list'] = np.array( + relative_proposal_list, dtype=np.float32) + results['scale_factor_list'] = np.array( + scale_factor_list, dtype=np.float32) + results['proposal_tick_list'] = np.array( + proposal_tick_list, dtype=np.int32) + results['reg_norm_consts'] = self.reg_norm_consts + + return self.pipeline(results) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/datasets/video_dataset.py b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/video_dataset.py new file mode 100644 index 0000000..bf9a30a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/datasets/video_dataset.py @@ -0,0 +1,67 @@ +import os.path as osp + +import torch + +from .base import BaseDataset +from .registry import DATASETS + + +@DATASETS.register_module() +class VideoDataset(BaseDataset): + """Video dataset for action recognition. + + The dataset loads raw videos and apply specified transforms to return a + dict containing the frame tensors and other information. + + The ann_file is a text file with multiple lines, and each line indicates + a sample video with the filepath and label, which are split with a + whitespace. Example of a annotation file: + + .. code-block:: txt + + some/path/000.mp4 1 + some/path/001.mp4 1 + some/path/002.mp4 2 + some/path/003.mp4 2 + some/path/004.mp4 3 + some/path/005.mp4 3 + + + Args: + ann_file (str): Path to the annotation file. + pipeline (list[dict | callable]): A sequence of data transforms. + start_index (int): Specify a start index for frames in consideration of + different filename format. However, when taking videos as input, + it should be set to 0, since frames loaded from videos count + from 0. Default: 0. + **kwargs: Keyword arguments for ``BaseDataset``. + """ + + def __init__(self, ann_file, pipeline, start_index=0, **kwargs): + super().__init__(ann_file, pipeline, start_index=start_index, **kwargs) + + def load_annotations(self): + """Load annotation file to get video information.""" + if self.ann_file.endswith('.json'): + return self.load_json_annotations() + + video_infos = [] + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split() + if self.multi_class: + assert self.num_classes is not None + filename, label = line_split[0], line_split[1:] + label = list(map(int, label)) + onehot = torch.zeros(self.num_classes) + onehot[label] = 1.0 + else: + filename, label = line_split + label = int(label) + if self.data_prefix is not None: + filename = osp.join(self.data_prefix, filename) + video_infos.append( + dict( + filename=filename, + label=onehot if self.multi_class else label)) + return video_infos diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/localization/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/localization/__init__.py new file mode 100644 index 0000000..2a0fd9e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/localization/__init__.py @@ -0,0 +1,10 @@ +from .bsn_utils import generate_bsp_feature, generate_candidate_proposals +from .proposal_utils import soft_nms, temporal_iop, temporal_iou +from .ssn_utils import (eval_ap, load_localize_proposal_file, + perform_regression, temporal_nms) + +__all__ = [ + 'generate_candidate_proposals', 'generate_bsp_feature', 'temporal_iop', + 'temporal_iou', 'soft_nms', 'load_localize_proposal_file', + 'perform_regression', 'temporal_nms', 'eval_ap' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/localization/bsn_utils.py b/Downstream/Open-Set-Action-Recognition/mmaction/localization/bsn_utils.py new file mode 100644 index 0000000..d748227 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/localization/bsn_utils.py @@ -0,0 +1,267 @@ +import os.path as osp + +import numpy as np + +from .proposal_utils import temporal_iop, temporal_iou + + +def generate_candidate_proposals(video_list, + video_infos, + tem_results_dir, + temporal_scale, + peak_threshold, + tem_results_ext='.csv', + result_dict=None): + """Generate Candidate Proposals with given temporal evalutation results. + Each proposal file will contain: + 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'. + + Args: + video_list (list[int]): List of video indexs to generate proposals. + video_infos (list[dict]): List of video_info dict that contains + 'video_name', 'duration_frame', 'duration_second', + 'feature_frame', and 'annotations'. + tem_results_dir (str): Directory to load temporal evaluation + results. + temporal_scale (int): The number (scale) on temporal axis. + peak_threshold (float): The threshold for proposal generation. + tem_results_ext (str): File extension for temporal evaluation + model output. Default: '.csv'. + result_dict (dict | None): The dict to save the results. Default: None. + + Returns: + dict: A dict contains video_name as keys and proposal list as value. + If result_dict is not None, save the results to it. + """ + if tem_results_ext != '.csv': + raise NotImplementedError('Only support csv format now.') + + tscale = temporal_scale + tgap = 1. / tscale + proposal_dict = {} + for video_index in video_list: + video_name = video_infos[video_index]['video_name'] + tem_path = osp.join(tem_results_dir, video_name + tem_results_ext) + tem_results = np.loadtxt( + tem_path, dtype=np.float32, delimiter=',', skiprows=1) + start_scores = tem_results[:, 1] + end_scores = tem_results[:, 2] + + max_start = max(start_scores) + max_end = max(end_scores) + + start_bins = np.zeros(len(start_scores)) + start_bins[[0, -1]] = 1 + end_bins = np.zeros(len(end_scores)) + end_bins[[0, -1]] = 1 + for idx in range(1, tscale - 1): + if start_scores[idx] > start_scores[ + idx + 1] and start_scores[idx] > start_scores[idx - 1]: + start_bins[idx] = 1 + elif start_scores[idx] > (peak_threshold * max_start): + start_bins[idx] = 1 + if end_scores[idx] > end_scores[ + idx + 1] and end_scores[idx] > end_scores[idx - 1]: + end_bins[idx] = 1 + elif end_scores[idx] > (peak_threshold * max_end): + end_bins[idx] = 1 + + tmin_list = [] + tmin_score_list = [] + tmax_list = [] + tmax_score_list = [] + for idx in range(tscale): + if start_bins[idx] == 1: + tmin_list.append(tgap / 2 + tgap * idx) + tmin_score_list.append(start_scores[idx]) + if end_bins[idx] == 1: + tmax_list.append(tgap / 2 + tgap * idx) + tmax_score_list.append(end_scores[idx]) + + new_props = [] + for tmax, tmax_score in zip(tmax_list, tmax_score_list): + for tmin, tmin_score in zip(tmin_list, tmin_score_list): + if tmin >= tmax: + break + new_props.append([tmin, tmax, tmin_score, tmax_score]) + + new_props = np.stack(new_props) + + score = (new_props[:, 2] * new_props[:, 3]).reshape(-1, 1) + new_props = np.concatenate((new_props, score), axis=1) + + new_props = new_props[new_props[:, -1].argsort()[::-1]] + video_info = video_infos[video_index] + video_frame = video_info['duration_frame'] + video_second = video_info['duration_second'] + feature_frame = video_info['feature_frame'] + corrected_second = float(feature_frame) / video_frame * video_second + + gt_tmins = [] + gt_tmaxs = [] + for annotations in video_info['annotations']: + gt_tmins.append(annotations['segment'][0] / corrected_second) + gt_tmaxs.append(annotations['segment'][1] / corrected_second) + + new_iou_list = [] + new_ioa_list = [] + for new_prop in new_props: + new_iou = max( + temporal_iou(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs)) + new_ioa = max( + temporal_iop(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs)) + new_iou_list.append(new_iou) + new_ioa_list.append(new_ioa) + + new_iou_list = np.array(new_iou_list).reshape(-1, 1) + new_ioa_list = np.array(new_ioa_list).reshape(-1, 1) + new_props = np.concatenate((new_props, new_iou_list), axis=1) + new_props = np.concatenate((new_props, new_ioa_list), axis=1) + proposal_dict[video_name] = new_props + if result_dict is not None: + result_dict[video_name] = new_props + return proposal_dict + + +def generate_bsp_feature(video_list, + video_infos, + tem_results_dir, + pgm_proposals_dir, + top_k=1000, + bsp_boundary_ratio=0.2, + num_sample_start=8, + num_sample_end=8, + num_sample_action=16, + num_sample_interp=3, + tem_results_ext='.csv', + pgm_proposal_ext='.csv', + result_dict=None): + """Generate Boundary-Sensitive Proposal Feature with given proposals. + + Args: + video_list (list[int]): List of video indexs to generate bsp_feature. + video_infos (list[dict]): List of video_info dict that contains + 'video_name'. + tem_results_dir (str): Directory to load temporal evaluation + results. + pgm_proposals_dir (str): Directory to load proposals. + top_k (int): Number of proposals to be considered. Default: 1000 + bsp_boundary_ratio (float): Ratio for proposal boundary + (start/end). Default: 0.2. + num_sample_start (int): Num of samples for actionness in + start region. Default: 8. + num_sample_end (int): Num of samples for actionness in end region. + Default: 8. + num_sample_action (int): Num of samples for actionness in center + region. Default: 16. + num_sample_interp (int): Num of samples for interpolation for + each sample point. Default: 3. + tem_results_ext (str): File extension for temporal evaluation + model output. Default: '.csv'. + pgm_proposal_ext (str): File extension for proposals. Default: '.csv'. + result_dict (dict | None): The dict to save the results. Default: None. + + Returns: + bsp_feature_dict (dict): A dict contains video_name as keys and + bsp_feature as value. If result_dict is not None, save the + results to it. + """ + if tem_results_ext != '.csv' or pgm_proposal_ext != '.csv': + raise NotImplementedError('Only support csv format now.') + + bsp_feature_dict = {} + for video_index in video_list: + video_name = video_infos[video_index]['video_name'] + + # Load temporal evaluation results + tem_path = osp.join(tem_results_dir, video_name + tem_results_ext) + tem_results = np.loadtxt( + tem_path, dtype=np.float32, delimiter=',', skiprows=1) + score_action = tem_results[:, 0] + seg_tmins = tem_results[:, 3] + seg_tmaxs = tem_results[:, 4] + video_scale = len(tem_results) + video_gap = seg_tmaxs[0] - seg_tmins[0] + video_extend = int(video_scale / 4 + 10) + + # Load proposals results + proposal_path = osp.join(pgm_proposals_dir, + video_name + pgm_proposal_ext) + pgm_proposals = np.loadtxt( + proposal_path, dtype=np.float32, delimiter=',', skiprows=1) + pgm_proposals = pgm_proposals[:top_k] + + # Generate temporal sample points + boundary_zeros = np.zeros([video_extend]) + score_action = np.concatenate( + (boundary_zeros, score_action, boundary_zeros)) + begin_tp = [] + middle_tp = [] + end_tp = [] + for i in range(video_extend): + begin_tp.append(-video_gap / 2 - + (video_extend - 1 - i) * video_gap) + end_tp.append(video_gap / 2 + seg_tmaxs[-1] + i * video_gap) + for i in range(video_scale): + middle_tp.append(video_gap / 2 + i * video_gap) + t_points = begin_tp + middle_tp + end_tp + + bsp_feature = [] + for pgm_proposal in pgm_proposals: + tmin = pgm_proposal[0] + tmax = pgm_proposal[1] + + tlen = tmax - tmin + # Temporal range for start + tmin_0 = tmin - tlen * bsp_boundary_ratio + tmin_1 = tmin + tlen * bsp_boundary_ratio + # Temporal range for end + tmax_0 = tmax - tlen * bsp_boundary_ratio + tmax_1 = tmax + tlen * bsp_boundary_ratio + + # Generate features at start boundary + tlen_start = (tmin_1 - tmin_0) / (num_sample_start - 1) + tlen_start_sample = tlen_start / num_sample_interp + t_new = [ + tmin_0 - tlen_start / 2 + tlen_start_sample * i + for i in range(num_sample_start * num_sample_interp + 1) + ] + y_new_start_action = np.interp(t_new, t_points, score_action) + y_new_start = [ + np.mean(y_new_start_action[i * num_sample_interp:(i + 1) * + num_sample_interp + 1]) + for i in range(num_sample_start) + ] + # Generate features at end boundary + tlen_end = (tmax_1 - tmax_0) / (num_sample_end - 1) + tlen_end_sample = tlen_end / num_sample_interp + t_new = [ + tmax_0 - tlen_end / 2 + tlen_end_sample * i + for i in range(num_sample_end * num_sample_interp + 1) + ] + y_new_end_action = np.interp(t_new, t_points, score_action) + y_new_end = [ + np.mean(y_new_end_action[i * num_sample_interp:(i + 1) * + num_sample_interp + 1]) + for i in range(num_sample_end) + ] + # Generate features for action + tlen_action = (tmax - tmin) / (num_sample_action - 1) + tlen_action_sample = tlen_action / num_sample_interp + t_new = [ + tmin - tlen_action / 2 + tlen_action_sample * i + for i in range(num_sample_action * num_sample_interp + 1) + ] + y_new_action = np.interp(t_new, t_points, score_action) + y_new_action = [ + np.mean(y_new_action[i * num_sample_interp:(i + 1) * + num_sample_interp + 1]) + for i in range(num_sample_action) + ] + feature = np.concatenate([y_new_action, y_new_start, y_new_end]) + bsp_feature.append(feature) + bsp_feature = np.array(bsp_feature) + bsp_feature_dict[video_name] = bsp_feature + if result_dict is not None: + result_dict[video_name] = bsp_feature + return bsp_feature_dict diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/localization/proposal_utils.py b/Downstream/Open-Set-Action-Recognition/mmaction/localization/proposal_utils.py new file mode 100644 index 0000000..0cf0111 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/localization/proposal_utils.py @@ -0,0 +1,94 @@ +import numpy as np + + +def temporal_iou(proposal_min, proposal_max, gt_min, gt_max): + """Compute IoU score between a groundtruth bbox and the proposals. + + Args: + proposal_min (list[float]): List of temporal anchor min. + proposal_max (list[float]): List of temporal anchor max. + gt_min (float): Groundtruth temporal box min. + gt_max (float): Groundtruth temporal box max. + + Returns: + list[float]: List of iou scores. + """ + len_anchors = proposal_max - proposal_min + int_tmin = np.maximum(proposal_min, gt_min) + int_tmax = np.minimum(proposal_max, gt_max) + inter_len = np.maximum(int_tmax - int_tmin, 0.) + union_len = len_anchors - inter_len + gt_max - gt_min + jaccard = np.divide(inter_len, union_len) + return jaccard + + +def temporal_iop(proposal_min, proposal_max, gt_min, gt_max): + """Compute IoP score between a groundtruth bbox and the proposals. + + Compute the IoP which is defined as the overlap ratio with + groundtruth proportional to the duration of this proposal. + + Args: + proposal_min (list[float]): List of temporal anchor min. + proposal_max (list[float]): List of temporal anchor max. + gt_min (float): Groundtruth temporal box min. + gt_max (float): Groundtruth temporal box max. + + Returns: + list[float]: List of intersection over anchor scores. + """ + len_anchors = np.array(proposal_max - proposal_min) + int_tmin = np.maximum(proposal_min, gt_min) + int_tmax = np.minimum(proposal_max, gt_max) + inter_len = np.maximum(int_tmax - int_tmin, 0.) + scores = np.divide(inter_len, len_anchors) + return scores + + +def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k): + """Soft NMS for temporal proposals. + + Args: + proposals (np.ndarray): Proposals generated by network. + alpha (float): Alpha value of Gaussian decaying function. + low_threshold (float): Low threshold for soft nms. + high_threshold (float): High threshold for soft nms. + top_k (int): Top k values to be considered. + + Returns: + np.ndarray: The updated proposals. + """ + proposals = proposals[proposals[:, -1].argsort()[::-1]] + tstart = list(proposals[:, 0]) + tend = list(proposals[:, 1]) + tscore = list(proposals[:, -1]) + rstart = [] + rend = [] + rscore = [] + + while len(tscore) > 0 and len(rscore) <= top_k: + max_index = np.argmax(tscore) + max_width = tend[max_index] - tstart[max_index] + iou_list = temporal_iou(tstart[max_index], tend[max_index], + np.array(tstart), np.array(tend)) + iou_exp_list = np.exp(-np.square(iou_list) / alpha) + + for idx, _ in enumerate(tscore): + if idx != max_index: + current_iou = iou_list[idx] + if current_iou > low_threshold + (high_threshold - + low_threshold) * max_width: + tscore[idx] = tscore[idx] * iou_exp_list[idx] + + rstart.append(tstart[max_index]) + rend.append(tend[max_index]) + rscore.append(tscore[max_index]) + tstart.pop(max_index) + tend.pop(max_index) + tscore.pop(max_index) + + rstart = np.array(rstart).reshape(-1, 1) + rend = np.array(rend).reshape(-1, 1) + rscore = np.array(rscore).reshape(-1, 1) + new_proposals = np.concatenate((rstart, rend, rscore), axis=1) + return new_proposals diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/localization/ssn_utils.py b/Downstream/Open-Set-Action-Recognition/mmaction/localization/ssn_utils.py new file mode 100644 index 0000000..0c1f528 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/localization/ssn_utils.py @@ -0,0 +1,168 @@ +from itertools import groupby + +import numpy as np + +from ..core import average_precision_at_temporal_iou +from . import temporal_iou + + +def load_localize_proposal_file(filename): + """Load the proposal file and split it into many parts which contain one + video's information separately. + + Args: + filename(str): Path to the proposal file. + + Returns: + list: List of all videos' information. + """ + lines = list(open(filename)) + + # Split the proposal file into many parts which contain one video's + # information separately. + groups = groupby(lines, lambda x: x.startswith('#')) + + video_infos = [[x.strip() for x in list(g)] for k, g in groups if not k] + + def parse_group(video_info): + """Parse the video's information. + + Template information of a video in a standard file: + # index + video_id + num_frames + fps + num_gts + label, start_frame, end_frame + label, start_frame, end_frame + ... + num_proposals + label, best_iou, overlap_self, start_frame, end_frame + label, best_iou, overlap_self, start_frame, end_frame + ... + + Example of a standard annotation file: + + .. code-block:: txt + + # 0 + video_validation_0000202 + 5666 + 1 + 3 + 8 130 185 + 8 832 1136 + 8 1303 1381 + 5 + 8 0.0620 0.0620 790 5671 + 8 0.1656 0.1656 790 2619 + 8 0.0833 0.0833 3945 5671 + 8 0.0960 0.0960 4173 5671 + 8 0.0614 0.0614 3327 5671 + + Args: + video_info (list): Information of the video. + + Returns: + tuple[str, int, list, list]: + video_id (str): Name of the video. + num_frames (int): Number of frames in the video. + gt_boxes (list): List of the information of gt boxes. + proposal_boxes (list): List of the information of + proposal boxes. + """ + offset = 0 + video_id = video_info[offset] + offset += 1 + + num_frames = int(float(video_info[1]) * float(video_info[2])) + num_gts = int(video_info[3]) + offset = 4 + + gt_boxes = [x.split() for x in video_info[offset:offset + num_gts]] + offset += num_gts + num_proposals = int(video_info[offset]) + offset += 1 + proposal_boxes = [ + x.split() for x in video_info[offset:offset + num_proposals] + ] + + return video_id, num_frames, gt_boxes, proposal_boxes + + return [parse_group(video_info) for video_info in video_infos] + + +def perform_regression(detections): + """Perform regression on detection results. + + Args: + detections (list): Detection results before regression. + + Returns: + list: Detection results after regression. + """ + starts = detections[:, 0] + ends = detections[:, 1] + centers = (starts + ends) / 2 + durations = ends - starts + + new_centers = centers + durations * detections[:, 3] + new_durations = durations * np.exp(detections[:, 4]) + + new_detections = np.concatenate( + (np.clip(new_centers - new_durations / 2, 0, + 1)[:, None], np.clip(new_centers + new_durations / 2, 0, + 1)[:, None], detections[:, 2:]), + axis=1) + return new_detections + + +def temporal_nms(detections, threshold): + """Parse the video's information. + + Args: + detections (list): Detection results before NMS. + threshold (float): Threshold of NMS. + + Returns: + list: Detection results after NMS. + """ + starts = detections[:, 0] + ends = detections[:, 1] + scores = detections[:, 2] + + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + ious = temporal_iou(starts[order[1:]], ends[order[1:]], starts[i], + ends[i]) + idxs = np.where(ious <= threshold)[0] + order = order[idxs + 1] + + return detections[keep, :] + + +def eval_ap(detections, gt_by_cls, iou_range): + """Evaluate average precisions. + + Args: + detections (dict): Results of detections. + gt_by_cls (dict): Information of groudtruth. + iou_range (list): Ranges of iou. + + Returns: + list: Average precision values of classes at ious. + """ + ap_values = np.zeros((len(detections), len(iou_range))) + + for iou_idx, min_overlap in enumerate(iou_range): + for class_idx, _ in enumerate(detections): + ap = average_precision_at_temporal_iou(gt_by_cls[class_idx], + detections[class_idx], + [min_overlap]) + ap_values[class_idx, iou_idx] = ap + + return ap_values diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/__init__.py new file mode 100644 index 0000000..a2ef283 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/__init__.py @@ -0,0 +1,29 @@ +from .backbones import (C3D, X3D, ResNet, ResNet2Plus1d, ResNet3d, ResNet3dCSN, + ResNet3dSlowFast, ResNet3dSlowOnly, ResNetAudio, + ResNetTIN, ResNetTSM) +from .builder import (build_backbone, build_head, build_localizer, build_loss, + build_model, build_neck, build_recognizer) +from .common import Conv2plus1d, ConvAudio +from .heads import (AudioTSNHead, BaseHead, I3DHead, SlowFastHead, TPNHead, + TSMHead, TSNHead, X3DHead) +from .localizers import BMN, PEM, TEM +from .losses import (BCELossWithLogits, BinaryLogisticRegressionLoss, BMNLoss, + CrossEntropyLoss, HVULoss, NLLLoss, OHEMHingeLoss, + SSNLoss) +from .necks import TPN +from .recognizers import (AudioRecognizer, BaseRecognizer, recognizer2d, + recognizer3d) +from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, RECOGNIZERS + +__all__ = [ + 'BACKBONES', 'HEADS', 'RECOGNIZERS', 'build_recognizer', 'build_head', + 'build_backbone', 'recognizer2d', 'recognizer3d', 'C3D', 'ResNet', + 'ResNet3d', 'ResNet2Plus1d', 'I3DHead', 'TSNHead', 'TSMHead', 'BaseHead', + 'BaseRecognizer', 'LOSSES', 'CrossEntropyLoss', 'NLLLoss', 'HVULoss', + 'ResNetTSM', 'ResNet3dSlowFast', 'SlowFastHead', 'Conv2plus1d', + 'ResNet3dSlowOnly', 'BCELossWithLogits', 'LOCALIZERS', 'build_localizer', + 'PEM', 'TEM', 'BinaryLogisticRegressionLoss', 'BMN', 'BMNLoss', + 'build_model', 'OHEMHingeLoss', 'SSNLoss', 'ResNet3dCSN', 'ResNetTIN', + 'TPN', 'TPNHead', 'build_loss', 'build_neck', 'AudioRecognizer', + 'AudioTSNHead', 'X3D', 'X3DHead', 'ResNetAudio', 'ConvAudio' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/ViT3D.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/ViT3D.py new file mode 100644 index 0000000..14eb6fa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/ViT3D.py @@ -0,0 +1,356 @@ +from functools import partial +import imp +from tkinter.messagebox import NO +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model +from mmcv.runner import load_checkpoint,_load_checkpoint,load_state_dict +from ..builder import BACKBONES +from ...utils import get_root_logger +import pdb +from torch.autograd import Variable + +try: + from mmdet.models import BACKBONES as MMDET_BACKBONES + mmdet_imported = True +except (ImportError, ModuleNotFoundError): + mmdet_imported = False + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + **kwargs + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + +## encoder 和 decoder中的基本块 +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + +### 将图像打成patch +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.tubelet_size = int(tubelet_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d(in_channels=in_chans, out_channels=embed_dim, + kernel_size = (self.tubelet_size, patch_size[0],patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1])) + + def forward(self, x, **kwargs): + B, C, T, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + ''' Sinusoid position encoding table ''' + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + +@BACKBONES.register_module() +class VisionTransformer3D(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6),#nn.LayerNorm, + init_values=0., + use_learnable_pos_emb=False, + init_scale=0., + all_frames=32, + tubelet_size=2, + use_mean_pooling=True, + feature_reshape = False, + pretrained=None): + super().__init__() + self.pretrained = pretrained + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=all_frames, tubelet_size=self.tubelet_size) + num_patches = self.patch_embed.num_patches + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + self.pos_drop = nn.Dropout(p=drop_rate) + + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.feature_reshape = feature_reshape + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + self.init_weights() + + def init_weights(self,pretrained=None): + print("Steped into init_weights") + if pretrained: + self.pretrained = pretrained + if isinstance(self.pretrained,str): + self.apply(self._init_weights) + logger = get_root_logger() + logger.info(f'load model from: {self.pretrained}') + state_dict = _load_checkpoint(self.pretrained) + state_dict = state_dict['module'] + load_state_dict(self, state_dict, strict=False, logger=logger) + elif self.pretrained is None: + self.apply(self._init_weights) + else: + raise TypeError('pretrained must be a str or None') + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + B, _, _ = x.size() + + if self.pos_embed is not None: + #pdb.set_trace() + x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + x = self.pos_drop(x) + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + #pdb.set_trace() + if self.feature_reshape: + x = x.reshape(-1,8,14,14,self.embed_dim) + x = x.permute(0,4,1,2,3) + return x + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + return x + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer3D( + patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer3D( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer3D( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer3D( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer3D( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_512(pretrained=False, **kwargs): + model = VisionTransformer3D( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model +if mmdet_imported: + MMDET_BACKBONES.register_module()(VisionTransformer3D) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/__init__.py new file mode 100644 index 0000000..bab01cb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/__init__.py @@ -0,0 +1,18 @@ +from .c3d import C3D +from .resnet import ResNet +from .resnet2plus1d import ResNet2Plus1d +from .resnet3d import ResNet3d +from .resnet3d_csn import ResNet3dCSN +from .resnet3d_slowfast import ResNet3dSlowFast +from .resnet3d_slowonly import ResNet3dSlowOnly +from .resnet_audio import ResNetAudio +from .resnet_tin import ResNetTIN +from .resnet_tsm import ResNetTSM +from .x3d import X3D +from .ViT3D import VisionTransformer3D + +__all__ = [ + 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', + 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D', + 'ResNetAudio', 'VisionTransformer3D' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/c3d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/c3d.py new file mode 100644 index 0000000..847ff57 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/c3d.py @@ -0,0 +1,139 @@ +import torch.nn as nn +from mmcv.cnn import ConvModule, constant_init, kaiming_init, normal_init +from mmcv.runner import load_checkpoint +from mmcv.utils import _BatchNorm + +from ...utils import get_root_logger +from ..registry import BACKBONES + + +@BACKBONES.register_module() +class C3D(nn.Module): + """C3D backbone. + + Args: + pretrained (str | None): Name of pretrained model. + style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: 'pytorch'. + conv_cfg (dict | None): Config dict for convolution layer. + If set to None, it uses ``dict(type='Conv3d')`` to construct + layers. Default: None. + norm_cfg (dict | None): Config for norm layers. required keys are + ``type``, Default: None. + act_cfg (dict | None): Config dict for activation layer. If set to + None, it uses ``dict(type='ReLU')`` to construct layers. + Default: None. + dropout_ratio (float): Probability of dropout layer. Default: 0.5. + init_std (float): Std value for Initiation of fc layers. Default: 0.01. + """ + + def __init__(self, + pretrained=None, + style='pytorch', + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + dropout_ratio=0.5, + init_std=0.005): + super().__init__() + if conv_cfg is None: + conv_cfg = dict(type='Conv3d') + if act_cfg is None: + act_cfg = dict(type='ReLU') + self.pretrained = pretrained + self.style = style + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.dropout_ratio = dropout_ratio + self.init_std = init_std + + c3d_conv_param = dict( + kernel_size=(3, 3, 3), + padding=(1, 1, 1), + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.conv1a = ConvModule(3, 64, **c3d_conv_param) + self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) + + self.conv2a = ConvModule(64, 128, **c3d_conv_param) + self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) + + self.conv3a = ConvModule(128, 256, **c3d_conv_param) + self.conv3b = ConvModule(256, 256, **c3d_conv_param) + self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) + + self.conv4a = ConvModule(256, 512, **c3d_conv_param) + self.conv4b = ConvModule(512, 512, **c3d_conv_param) + self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) + + self.conv5a = ConvModule(512, 512, **c3d_conv_param) + self.conv5b = ConvModule(512, 512, **c3d_conv_param) + self.pool5 = nn.MaxPool3d( + kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)) + + self.fc6 = nn.Linear(8192, 4096) + self.fc7 = nn.Linear(4096, 4096) + + self.relu = nn.ReLU() + self.dropout = nn.Dropout(p=self.dropout_ratio) + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if isinstance(self.pretrained, str): + logger = get_root_logger() + logger.info(f'load model from: {self.pretrained}') + + load_checkpoint(self, self.pretrained, strict=False, logger=logger) + + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv3d): + kaiming_init(m) + elif isinstance(m, nn.Linear): + normal_init(m, std=self.init_std) + elif isinstance(m, _BatchNorm): + constant_init(m, 1) + + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + the size of x is (num_batches, 3, 16, 112, 112). + + Returns: + torch.Tensor: The feature of the input + samples extracted by the backbone. + """ + x = self.conv1a(x) + x = self.pool1(x) + + x = self.conv2a(x) + x = self.pool2(x) + + x = self.conv3a(x) + x = self.conv3b(x) + x = self.pool3(x) + + x = self.conv4a(x) + x = self.conv4b(x) + x = self.pool4(x) + + x = self.conv5a(x) + x = self.conv5b(x) + x = self.pool5(x) + + x = x.flatten(start_dim=1) + x = self.relu(self.fc6(x)) + x = self.dropout(x) + x = self.relu(self.fc7(x)) + + return x diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet.py new file mode 100644 index 0000000..ca0a205 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet.py @@ -0,0 +1,586 @@ +import torch.nn as nn +from mmcv.cnn import ConvModule, constant_init, kaiming_init +from mmcv.runner import _load_checkpoint, load_checkpoint +from mmcv.utils import _BatchNorm +from torch.utils import checkpoint as cp + +from ...utils import get_root_logger +from ..registry import BACKBONES + + +class BasicBlock(nn.Module): + """Basic block for ResNet. + + Args: + inplanes (int): Number of channels for the input in first conv2d layer. + planes (int): Number of channels produced by some norm/conv2d layers. + stride (int): Stride in the conv layer. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + downsample (nn.Module | None): Downsample layer. Default: None. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: 'pytorch'. + conv_cfg (dict): Config for norm layers. Default: dict(type='Conv'). + norm_cfg (dict): + Config for norm layers. required keys are `type` and + `requires_grad`. Default: dict(type='BN2d', requires_grad=True). + act_cfg (dict): Config for activate layers. + Default: dict(type='ReLU', inplace=True). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + conv_cfg=dict(type='Conv'), + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False): + super().__init__() + assert style in ['pytorch', 'caffe'] + self.conv1 = ConvModule( + inplanes, + planes, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv2 = ConvModule( + planes, + planes, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.style = style + self.stride = stride + self.dilation = dilation + self.norm_cfg = norm_cfg + assert not with_cp + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + identity = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out + identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + """Bottleneck block for ResNet. + + Args: + inplanes (int): + Number of channels for the input feature in first conv layer. + planes (int): + Number of channels produced by some norm layes and conv layers + stride (int): Spatial stride in the conv layer. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + downsample (nn.Module | None): Downsample layer. Default: None. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: 'pytorch'. + conv_cfg (dict): Config for norm layers. Default: dict(type='Conv'). + norm_cfg (dict): + Config for norm layers. required keys are `type` and + `requires_grad`. Default: dict(type='BN2d', requires_grad=True). + act_cfg (dict): Config for activate layers. + Default: dict(type='ReLU', inplace=True). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + conv_cfg=dict(type='Conv'), + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False): + super().__init__() + assert style in ['pytorch', 'caffe'] + self.inplanes = inplanes + self.planes = planes + if style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + self.conv1 = ConvModule( + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = ConvModule( + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv3 = ConvModule( + planes, + planes * self.expansion, + kernel_size=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.norm_cfg = norm_cfg + self.with_cp = with_cp + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + + def _inner_forward(x): + """Forward wrapper for utilizing checkpoint.""" + identity = x + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out + identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + with_cp=False): + """Build residual layer for ResNet. + + Args: + block: (nn.Module): Residual module to be built. + inplanes (int): Number of channels for the input feature in each block. + planes (int): Number of channels for the output feature in each block. + blocks (int): Number of residual blocks. + stride (int): Stride in the conv layer. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: 'pytorch'. + conv_cfg (dict | None): Config for norm layers. Default: None. + norm_cfg (dict | None): Config for norm layers. Default: None. + act_cfg (dict | None): Config for activate layers. Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + nn.Module: A residual layer for the given config. + """ + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = ConvModule( + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + dilation, + downsample, + style=style, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) + inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append( + block( + inplanes, + planes, + 1, + dilation, + style=style, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) + + return nn.Sequential(*layers) + + +@BACKBONES.register_module() +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + pretrained (str | None): Name of pretrained model. Default: None. + in_channels (int): Channel num of input features. Default: 3. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + out_indices (Sequence[int]): Indices of output feature. Default: (3, ). + dilations (Sequence[int]): Dilation of each stage. + style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: ``pytorch``. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + conv_cfg (dict): Config for norm layers. Default: dict(type='Conv'). + norm_cfg (dict): + Config for norm layers. required keys are `type` and + `requires_grad`. Default: dict(type='BN2d', requires_grad=True). + act_cfg (dict): Config for activate layers. + Default: dict(type='ReLU', inplace=True). + norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze + running stats (mean and var). Default: False. + partial_bn (bool): Whether to use partial bn. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + pretrained=None, + torchvision_pretrain=True, + in_channels=3, + num_stages=4, + out_indices=(3, ), + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + style='pytorch', + frozen_stages=-1, + conv_cfg=dict(type='Conv'), + norm_cfg=dict(type='BN2d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + norm_eval=False, + partial_bn=False, + with_cp=False): + super().__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.in_channels = in_channels + self.pretrained = pretrained + self.torchvision_pretrain = torchvision_pretrain + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.style = style + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.partial_bn = partial_bn + self.with_cp = with_cp + + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = 64 + + self._make_stem_layer() + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self.feat_dim = self.block.expansion * 64 * 2**( + len(self.stage_blocks) - 1) + + def _make_stem_layer(self): + """Construct the stem layers consists of a conv+norm+act module and a + pooling layer.""" + self.conv1 = ConvModule( + self.in_channels, + 64, + kernel_size=7, + stride=2, + padding=3, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + @staticmethod + def _load_conv_params(conv, state_dict_tv, module_name_tv, + loaded_param_names): + """Load the conv parameters of resnet from torchvision. + + Args: + conv (nn.Module): The destination conv module. + state_dict_tv (OrderedDict): The state dict of pretrained + torchvision model. + module_name_tv (str): The name of corresponding conv module in the + torchvision model. + loaded_param_names (list[str]): List of parameters that have been + loaded. + """ + + weight_tv_name = module_name_tv + '.weight' + conv.weight.data.copy_(state_dict_tv[weight_tv_name]) + loaded_param_names.append(weight_tv_name) + + if getattr(conv, 'bias') is not None: + bias_tv_name = module_name_tv + '.bias' + conv.bias.data.copy_(state_dict_tv[bias_tv_name]) + loaded_param_names.append(bias_tv_name) + + @staticmethod + def _load_bn_params(bn, state_dict_tv, module_name_tv, loaded_param_names): + """Load the bn parameters of resnet from torchvision. + + Args: + bn (nn.Module): The destination bn module. + state_dict_tv (OrderedDict): The state dict of pretrained + torchvision model. + module_name_tv (str): The name of corresponding bn module in the + torchvision model. + loaded_param_names (list[str]): List of parameters that have been + loaded. + """ + + for param_name, param in bn.named_parameters(): + param_tv_name = f'{module_name_tv}.{param_name}' + param_tv = state_dict_tv[param_tv_name] + param.data.copy_(param_tv) + loaded_param_names.append(param_tv_name) + + for param_name, param in bn.named_buffers(): + param_tv_name = f'{module_name_tv}.{param_name}' + # some buffers like num_batches_tracked may not exist + if param_tv_name in state_dict_tv: + param_tv = state_dict_tv[param_tv_name] + param.data.copy_(param_tv) + loaded_param_names.append(param_tv_name) + + def _load_torchvision_checkpoint(self, logger=None): + """Initiate the parameters from torchvision pretrained checkpoint.""" + state_dict_torchvision = _load_checkpoint(self.pretrained) + if 'state_dict' in state_dict_torchvision: + state_dict_torchvision = state_dict_torchvision['state_dict'] + + loaded_param_names = [] + for name, module in self.named_modules(): + if isinstance(module, ConvModule): + # we use a ConvModule to wrap conv+bn+relu layers, thus the + # name mapping is needed + if 'downsample' in name: + # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 + original_conv_name = name + '.0' + # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 + original_bn_name = name + '.1' + else: + # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} + original_conv_name = name + # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} + original_bn_name = name.replace('conv', 'bn') + self._load_conv_params(module.conv, state_dict_torchvision, + original_conv_name, loaded_param_names) + self._load_bn_params(module.bn, state_dict_torchvision, + original_bn_name, loaded_param_names) + + # check if any parameters in the 2d checkpoint are not loaded + remaining_names = set( + state_dict_torchvision.keys()) - set(loaded_param_names) + if remaining_names: + logger.info( + f'These parameters in pretrained checkpoint are not loaded' + f': {remaining_names}') + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if isinstance(self.pretrained, str): + logger = get_root_logger() + if self.torchvision_pretrain: + # torchvision's + self._load_torchvision_checkpoint(logger) + else: + # ours + load_checkpoint( + self, self.pretrained, strict=False, logger=logger) + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The feature of the input samples extracted + by the backbone. + """ + x = self.conv1(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(outs) == 1: + return outs[0] + + return tuple(outs) + + def _freeze_stages(self): + """Prevent all the parameters from being optimized before + ``self.frozen_stages``.""" + if self.frozen_stages >= 0: + self.conv1.bn.eval() + for m in self.conv1.modules(): + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def _partial_bn(self): + logger = get_root_logger() + logger.info('Freezing BatchNorm2D except the first one.') + count_bn = 0 + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + count_bn += 1 + if count_bn >= 2: + m.eval() + # shutdown update in frozen mode + m.weight.requires_grad = False + m.bias.requires_grad = False + + def train(self, mode=True): + """Set the optimization status when training.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + if mode and self.partial_bn: + self._partial_bn() diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet2plus1d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet2plus1d.py new file mode 100644 index 0000000..ec40802 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet2plus1d.py @@ -0,0 +1,49 @@ +from ..registry import BACKBONES +from .resnet3d import ResNet3d + + +@BACKBONES.register_module() +class ResNet2Plus1d(ResNet3d): + """ResNet (2+1)d backbone. + + This model is proposed in `A Closer Look at Spatiotemporal Convolutions for + Action Recognition `_ + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + assert self.pretrained2d is False + assert self.conv_cfg['type'] == 'Conv2plus1d' + + def _freeze_stages(self): + """Prevent all the parameters from being optimized before + ``self.frozen_stages``.""" + if self.frozen_stages >= 0: + self.conv1.eval() + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The feature of the input + samples extracted by the backbone. + """ + x = self.conv1(x) + x = self.maxpool(x) + for layer_name in self.res_layers: + res_layer = getattr(self, layer_name) + # no pool2 in R(2+1)d + x = res_layer(x) + + return x diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d.py new file mode 100644 index 0000000..4fd78bf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d.py @@ -0,0 +1,817 @@ +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, NonLocal3d, build_activation_layer, + constant_init, kaiming_init) +from mmcv.runner import _load_checkpoint, load_checkpoint +from mmcv.utils import _BatchNorm +from torch.nn.modules.utils import _ntuple, _triple + +from ...utils import get_root_logger +from ..registry import BACKBONES + + +class BasicBlock3d(nn.Module): + """BasicBlock 3d block for ResNet3D. + + Args: + inplanes (int): Number of channels for the input in first conv3d layer. + planes (int): Number of channels produced by some norm/conv3d layers. + spatial_stride (int): Spatial stride in the conv3d layer. Default: 1. + temporal_stride (int): Temporal stride in the conv3d layer. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + downsample (nn.Module | None): Downsample layer. Default: None. + style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: 'pytorch'. + inflate (bool): Whether to inflate kernel. Default: True. + non_local (bool): Determine whether to apply non-local module in this + block. Default: False. + non_local_cfg (dict): Config for non-local module. Default: ``dict()``. + conv_cfg (dict): Config dict for convolution layer. + Default: ``dict(type='Conv3d')``. + norm_cfg (dict): Config for norm layers. required keys are ``type``, + Default: ``dict(type='BN3d')``. + act_cfg (dict): Config dict for activation layer. + Default: ``dict(type='ReLU')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + expansion = 1 + + def __init__(self, + inplanes, + planes, + spatial_stride=1, + temporal_stride=1, + dilation=1, + downsample=None, + style='pytorch', + inflate=True, + non_local=False, + non_local_cfg=dict(), + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU'), + with_cp=False, + **kwargs): + super().__init__() + assert style in ['pytorch', 'caffe'] + # make sure that only ``inflate_style`` is passed into kwargs + assert set(kwargs.keys()).issubset(['inflate_style']) + + self.inplanes = inplanes + self.planes = planes + self.spatial_stride = spatial_stride + self.temporal_stride = temporal_stride + self.dilation = dilation + self.style = style + self.inflate = inflate + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.with_cp = with_cp + self.non_local = non_local + self.non_local_cfg = non_local_cfg + + self.conv1_stride_s = spatial_stride + self.conv2_stride_s = 1 + self.conv1_stride_t = temporal_stride + self.conv2_stride_t = 1 + + if self.inflate: + conv1_kernel_size = (3, 3, 3) + conv1_padding = (1, dilation, dilation) + conv2_kernel_size = (3, 3, 3) + conv2_padding = (1, 1, 1) + else: + conv1_kernel_size = (1, 3, 3) + conv1_padding = (0, dilation, dilation) + conv2_kernel_size = (1, 3, 3) + conv2_padding = (0, 1, 1) + + self.conv1 = ConvModule( + inplanes, + planes, + conv1_kernel_size, + stride=(self.conv1_stride_t, self.conv1_stride_s, + self.conv1_stride_s), + padding=conv1_padding, + dilation=(1, dilation, dilation), + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.conv2 = ConvModule( + planes, + planes * self.expansion, + conv2_kernel_size, + stride=(self.conv2_stride_t, self.conv2_stride_s, + self.conv2_stride_s), + padding=conv2_padding, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + + self.downsample = downsample + self.relu = build_activation_layer(self.act_cfg) + + if self.non_local: + self.non_local_block = NonLocal3d(self.conv2.norm.num_features, + **self.non_local_cfg) + + def forward(self, x): + """Defines the computation performed at every call.""" + + def _inner_forward(x): + """Forward wrapper for utilizing checkpoint.""" + identity = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out + identity + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + out = self.relu(out) + + if self.non_local: + out = self.non_local_block(out) + + return out + + +class Bottleneck3d(nn.Module): + """Bottleneck 3d block for ResNet3D. + + Args: + inplanes (int): Number of channels for the input in first conv3d layer. + planes (int): Number of channels produced by some norm/conv3d layers. + spatial_stride (int): Spatial stride in the conv3d layer. Default: 1. + temporal_stride (int): Temporal stride in the conv3d layer. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + downsample (nn.Module | None): Downsample layer. Default: None. + style (str): ``pytorch`` or ``caffe``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: 'pytorch'. + inflate (bool): Whether to inflate kernel. Default: True. + inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the + kernel sizes and padding strides for conv1 and conv2 in each block. + Default: '3x1x1'. + non_local (bool): Determine whether to apply non-local module in this + block. Default: False. + non_local_cfg (dict): Config for non-local module. Default: ``dict()``. + conv_cfg (dict): Config dict for convolution layer. + Default: ``dict(type='Conv3d')``. + norm_cfg (dict): Config for norm layers. required keys are ``type``, + Default: ``dict(type='BN3d')``. + act_cfg (dict): Config dict for activation layer. + Default: ``dict(type='ReLU')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + spatial_stride=1, + temporal_stride=1, + dilation=1, + downsample=None, + style='pytorch', + inflate=True, + inflate_style='3x1x1', + non_local=False, + non_local_cfg=dict(), + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super().__init__() + assert style in ['pytorch', 'caffe'] + assert inflate_style in ['3x1x1', '3x3x3'] + + self.inplanes = inplanes + self.planes = planes + self.spatial_stride = spatial_stride + self.temporal_stride = temporal_stride + self.dilation = dilation + self.style = style + self.inflate = inflate + self.inflate_style = inflate_style + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.act_cfg = act_cfg + self.with_cp = with_cp + self.non_local = non_local + self.non_local_cfg = non_local_cfg + + if self.style == 'pytorch': + self.conv1_stride_s = 1 + self.conv2_stride_s = spatial_stride + self.conv1_stride_t = 1 + self.conv2_stride_t = temporal_stride + else: + self.conv1_stride_s = spatial_stride + self.conv2_stride_s = 1 + self.conv1_stride_t = temporal_stride + self.conv2_stride_t = 1 + + if self.inflate: + if inflate_style == '3x1x1': + conv1_kernel_size = (3, 1, 1) + conv1_padding = (1, 0, 0) + conv2_kernel_size = (1, 3, 3) + conv2_padding = (0, dilation, dilation) + else: + conv1_kernel_size = (1, 1, 1) + conv1_padding = (0, 0, 0) + conv2_kernel_size = (3, 3, 3) + conv2_padding = (1, dilation, dilation) + else: + conv1_kernel_size = (1, 1, 1) + conv1_padding = (0, 0, 0) + conv2_kernel_size = (1, 3, 3) + conv2_padding = (0, dilation, dilation) + + self.conv1 = ConvModule( + inplanes, + planes, + conv1_kernel_size, + stride=(self.conv1_stride_t, self.conv1_stride_s, + self.conv1_stride_s), + padding=conv1_padding, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.conv2 = ConvModule( + planes, + planes, + conv2_kernel_size, + stride=(self.conv2_stride_t, self.conv2_stride_s, + self.conv2_stride_s), + padding=conv2_padding, + dilation=(1, dilation, dilation), + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.conv3 = ConvModule( + planes, + planes * self.expansion, + 1, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + # No activation in the third ConvModule for bottleneck + act_cfg=None) + + self.downsample = downsample + self.relu = build_activation_layer(self.act_cfg) + + if self.non_local: + self.non_local_block = NonLocal3d(self.conv3.norm.num_features, + **self.non_local_cfg) + + def forward(self, x): + """Defines the computation performed at every call.""" + + def _inner_forward(x): + """Forward wrapper for utilizing checkpoint.""" + identity = x + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out + identity + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + out = self.relu(out) + + if self.non_local: + out = self.non_local_block(out) + + return out + + +@BACKBONES.register_module() +class ResNet3d(nn.Module): + """ResNet 3d backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + pretrained (str | None): Name of pretrained model. + pretrained2d (bool): Whether to load pretrained 2D model. + Default: True. + in_channels (int): Channel num of input features. Default: 3. + base_channels (int): Channel num of stem output features. Default: 64. + out_indices (Sequence[int]): Indices of output feature. Default: (3, ). + num_stages (int): Resnet stages. Default: 4. + spatial_strides (Sequence[int]): + Spatial strides of residual blocks of each stage. + Default: ``(1, 2, 2, 2)``. + temporal_strides (Sequence[int]): + Temporal strides of residual blocks of each stage. + Default: ``(1, 1, 1, 1)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + conv1_kernel (Sequence[int]): Kernel size of the first conv layer. + Default: ``(5, 7, 7)``. + conv1_stride_t (int): Temporal stride of the first conv layer. + Default: 2. + pool1_stride_t (int): Temporal stride of the first pooling layer. + Default: 2. + with_pool2 (bool): Whether to use pool2. Default: True. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: 'pytorch'. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + inflate (Sequence[int]): Inflate Dims of each block. + Default: (1, 1, 1, 1). + inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines the + kernel sizes and padding strides for conv1 and conv2 in each block. + Default: '3x1x1'. + conv_cfg (dict): Config for conv layers. required keys are ``type`` + Default: ``dict(type='Conv3d')``. + norm_cfg (dict): Config for norm layers. required keys are ``type`` and + ``requires_grad``. + Default: ``dict(type='BN3d', requires_grad=True)``. + act_cfg (dict): Config dict for activation layer. + Default: ``dict(type='ReLU', inplace=True)``. + norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze + running stats (mean and var). Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + non_local (Sequence[int]): Determine whether to apply non-local module + in the corresponding block of each stages. Default: (0, 0, 0, 0). + non_local_cfg (dict): Config for non-local module. Default: ``dict()``. + zero_init_residual (bool): + Whether to use zero initialization for residual block, + Default: True. + kwargs (dict, optional): Key arguments for "make_res_layer". + """ + + arch_settings = { + 18: (BasicBlock3d, (2, 2, 2, 2)), + 34: (BasicBlock3d, (3, 4, 6, 3)), + 50: (Bottleneck3d, (3, 4, 6, 3)), + 101: (Bottleneck3d, (3, 4, 23, 3)), + 152: (Bottleneck3d, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + pretrained, + pretrained2d=True, + in_channels=3, + num_stages=4, + base_channels=64, + out_indices=(3, ), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 1, 1, 1), + dilations=(1, 1, 1, 1), + conv1_kernel=(5, 7, 7), + conv1_stride_t=2, + pool1_stride_t=2, + with_pool2=True, + style='pytorch', + frozen_stages=-1, + inflate=(1, 1, 1, 1), + inflate_style='3x1x1', + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + norm_eval=False, + with_cp=False, + non_local=(0, 0, 0, 0), + non_local_cfg=dict(), + zero_init_residual=True, + **kwargs): + super().__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.pretrained = pretrained + self.pretrained2d = pretrained2d + self.in_channels = in_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.spatial_strides = spatial_strides + self.temporal_strides = temporal_strides + self.dilations = dilations + assert len(spatial_strides) == len(temporal_strides) == len( + dilations) == num_stages + self.conv1_kernel = conv1_kernel + self.conv1_stride_t = conv1_stride_t + self.pool1_stride_t = pool1_stride_t + self.with_pool2 = with_pool2 + self.style = style + self.frozen_stages = frozen_stages + self.stage_inflations = _ntuple(num_stages)(inflate) + self.non_local_stages = _ntuple(num_stages)(non_local) + self.inflate_style = inflate_style + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = self.base_channels + + self.non_local_cfg = non_local_cfg + + self._make_stem_layer() + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + spatial_stride = spatial_strides[i] + temporal_stride = temporal_strides[i] + dilation = dilations[i] + planes = self.base_channels * 2**i + res_layer = self.make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + spatial_stride=spatial_stride, + temporal_stride=temporal_stride, + dilation=dilation, + style=self.style, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + act_cfg=self.act_cfg, + non_local=self.non_local_stages[i], + non_local_cfg=self.non_local_cfg, + inflate=self.stage_inflations[i], + inflate_style=self.inflate_style, + with_cp=with_cp, + **kwargs) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self.feat_dim = self.block.expansion * self.base_channels * 2**( + len(self.stage_blocks) - 1) + + @staticmethod + def make_res_layer(block, + inplanes, + planes, + blocks, + spatial_stride=1, + temporal_stride=1, + dilation=1, + style='pytorch', + inflate=1, + inflate_style='3x1x1', + non_local=0, + non_local_cfg=dict(), + norm_cfg=None, + act_cfg=None, + conv_cfg=None, + with_cp=False, + **kwargs): + """Build residual layer for ResNet3D. + + Args: + block (nn.Module): Residual module to be built. + inplanes (int): Number of channels for the input feature + in each block. + planes (int): Number of channels for the output feature + in each block. + blocks (int): Number of residual blocks. + spatial_stride (int | Sequence[int]): Spatial strides in + residual and conv layers. Default: 1. + temporal_stride (int | Sequence[int]): Temporal strides in + residual and conv layers. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``, + the stride-two layer is the 3x3 conv layer, otherwise + the stride-two layer is the first 1x1 conv layer. + Default: ``pytorch``. + inflate (int | Sequence[int]): Determine whether to inflate + for each block. Default: 1. + inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines + the kernel sizes and padding strides for conv1 and conv2 + in each block. Default: '3x1x1'. + non_local (int | Sequence[int]): Determine whether to apply + non-local module in the corresponding block of each stages. + Default: 0. + non_local_cfg (dict): Config for non-local module. + Default: ``dict()``. + conv_cfg (dict | None): Config for norm layers. Default: None. + norm_cfg (dict | None): Config for norm layers. Default: None. + act_cfg (dict | None): Config for activate layers. Default: None. + with_cp (bool | None): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + + Returns: + nn.Module: A residual layer for the given config. + """ + inflate = inflate if not isinstance(inflate, + int) else (inflate, ) * blocks + non_local = non_local if not isinstance( + non_local, int) else (non_local, ) * blocks + assert len(inflate) == blocks and len(non_local) == blocks + downsample = None + if spatial_stride != 1 or inplanes != planes * block.expansion: + downsample = ConvModule( + inplanes, + planes * block.expansion, + kernel_size=1, + stride=(temporal_stride, spatial_stride, spatial_stride), + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + layers = [] + layers.append( + block( + inplanes, + planes, + spatial_stride=spatial_stride, + temporal_stride=temporal_stride, + dilation=dilation, + downsample=downsample, + style=style, + inflate=(inflate[0] == 1), + inflate_style=inflate_style, + non_local=(non_local[0] == 1), + non_local_cfg=non_local_cfg, + norm_cfg=norm_cfg, + conv_cfg=conv_cfg, + act_cfg=act_cfg, + with_cp=with_cp, + **kwargs)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + spatial_stride=1, + temporal_stride=1, + dilation=dilation, + style=style, + inflate=(inflate[i] == 1), + inflate_style=inflate_style, + non_local=(non_local[i] == 1), + non_local_cfg=non_local_cfg, + norm_cfg=norm_cfg, + conv_cfg=conv_cfg, + act_cfg=act_cfg, + with_cp=with_cp, + **kwargs)) + + return nn.Sequential(*layers) + + @staticmethod + def _inflate_conv_params(conv3d, state_dict_2d, module_name_2d, + inflated_param_names): + """Inflate a conv module from 2d to 3d. + + Args: + conv3d (nn.Module): The destination conv3d module. + state_dict_2d (OrderedDict): The state dict of pretrained 2d model. + module_name_2d (str): The name of corresponding conv module in the + 2d model. + inflated_param_names (list[str]): List of parameters that have been + inflated. + """ + weight_2d_name = module_name_2d + '.weight' + + conv2d_weight = state_dict_2d[weight_2d_name] + kernel_t = conv3d.weight.data.shape[2] + + new_weight = conv2d_weight.data.unsqueeze(2).expand_as( + conv3d.weight) / kernel_t + conv3d.weight.data.copy_(new_weight) + inflated_param_names.append(weight_2d_name) + + if getattr(conv3d, 'bias') is not None: + bias_2d_name = module_name_2d + '.bias' + conv3d.bias.data.copy_(state_dict_2d[bias_2d_name]) + inflated_param_names.append(bias_2d_name) + + @staticmethod + def _inflate_bn_params(bn3d, state_dict_2d, module_name_2d, + inflated_param_names): + """Inflate a norm module from 2d to 3d. + + Args: + bn3d (nn.Module): The destination bn3d module. + state_dict_2d (OrderedDict): The state dict of pretrained 2d model. + module_name_2d (str): The name of corresponding bn module in the + 2d model. + inflated_param_names (list[str]): List of parameters that have been + inflated. + """ + for param_name, param in bn3d.named_parameters(): + param_2d_name = f'{module_name_2d}.{param_name}' + param_2d = state_dict_2d[param_2d_name] + param.data.copy_(param_2d) + inflated_param_names.append(param_2d_name) + + for param_name, param in bn3d.named_buffers(): + param_2d_name = f'{module_name_2d}.{param_name}' + # some buffers like num_batches_tracked may not exist in old + # checkpoints + if param_2d_name in state_dict_2d: + param_2d = state_dict_2d[param_2d_name] + param.data.copy_(param_2d) + inflated_param_names.append(param_2d_name) + + def inflate_weights(self, logger): + """Inflate the resnet2d parameters to resnet3d. + + The differences between resnet3d and resnet2d mainly lie in an extra + axis of conv kernel. To utilize the pretrained parameters in 2d model, + the weight of conv2d models should be inflated to fit in the shapes of + the 3d counterpart. + + Args: + logger (logging.Logger): The logger used to print + debugging infomation. + """ + + state_dict_r2d = _load_checkpoint(self.pretrained) + if 'state_dict' in state_dict_r2d: + state_dict_r2d = state_dict_r2d['state_dict'] + + inflated_param_names = [] + for name, module in self.named_modules(): + if isinstance(module, ConvModule): + # we use a ConvModule to wrap conv+bn+relu layers, thus the + # name mapping is needed + if 'downsample' in name: + # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 + original_conv_name = name + '.0' + # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 + original_bn_name = name + '.1' + else: + # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} + original_conv_name = name + # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} + original_bn_name = name.replace('conv', 'bn') + if original_conv_name + '.weight' not in state_dict_r2d: + logger.warning(f'Module not exist in the state_dict_r2d' + f': {original_conv_name}') + else: + shape_2d = state_dict_r2d[original_conv_name + + '.weight'].shape + shape_3d = module.conv.weight.data.shape + if shape_2d != shape_3d[:2] + shape_3d[3:]: + logger.warning(f'Weight shape mismatch for ' + f': {original_conv_name} : ' + f'3d weight shape: {shape_3d}; ' + f'2d weight shape: {shape_2d}. ') + else: + self._inflate_conv_params(module.conv, state_dict_r2d, + original_conv_name, + inflated_param_names) + + if original_bn_name + '.weight' not in state_dict_r2d: + logger.warning(f'Module not exist in the state_dict_r2d' + f': {original_bn_name}') + else: + self._inflate_bn_params(module.bn, state_dict_r2d, + original_bn_name, + inflated_param_names) + + # check if any parameters in the 2d checkpoint are not loaded + remaining_names = set( + state_dict_r2d.keys()) - set(inflated_param_names) + if remaining_names: + logger.info(f'These parameters in the 2d checkpoint are not loaded' + f': {remaining_names}') + + def _make_stem_layer(self): + """Construct the stem layers consists of a conv+norm+act module and a + pooling layer.""" + self.conv1 = ConvModule( + self.in_channels, + self.base_channels, + kernel_size=self.conv1_kernel, + stride=(self.conv1_stride_t, 2, 2), + padding=tuple([(k - 1) // 2 for k in _triple(self.conv1_kernel)]), + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.maxpool = nn.MaxPool3d( + kernel_size=(1, 3, 3), + stride=(self.pool1_stride_t, 2, 2), + padding=(0, 1, 1)) + + self.pool2 = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1)) + + def _freeze_stages(self): + """Prevent all the parameters from being optimized before + ``self.frozen_stages``.""" + if self.frozen_stages >= 0: + self.conv1.eval() + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if isinstance(self.pretrained, str): + logger = get_root_logger() + logger.info(f'load model from: {self.pretrained}') + + if self.pretrained2d: + # Inflate 2D model into 3D model. + self.inflate_weights(logger) + + else: + # Directly load 3D model. + load_checkpoint( + self, self.pretrained, strict=False, logger=logger) + + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv3d): + kaiming_init(m) + elif isinstance(m, _BatchNorm): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck3d): + constant_init(m.conv3.bn, 0) + elif isinstance(m, BasicBlock3d): + constant_init(m.conv2.bn, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The feature of the input + samples extracted by the backbone. + """ + x = self.conv1(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i == 0 and self.with_pool2: + x = self.pool2(x) + if i in self.out_indices: + outs.append(x) + if len(outs) == 1: + return outs[0] + + return tuple(outs) + + def train(self, mode=True): + """Set the optimization status when training.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_csn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_csn.py new file mode 100644 index 0000000..97c3e42 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_csn.py @@ -0,0 +1,148 @@ +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmcv.utils import _BatchNorm + +from ..registry import BACKBONES +from .resnet3d import Bottleneck3d, ResNet3d + + +class CSNBottleneck3d(Bottleneck3d): + """Channel-Separated Bottleneck Block. + + This module is proposed in + "Video Classification with Channel-Separated Convolutional Networks" + Link: https://arxiv.org/pdf/1711.11248.pdf + + Args: + inplanes (int): Number of channels for the input in first conv3d layer. + planes (int): Number of channels produced by some norm/conv3d layers. + bottleneck_mode (str): Determine which ways to factorize a 3D + bottleneck block using channel-separated convolutional networks. + If set to 'ip', it will replace the 3x3x3 conv2 layer with a + 1x1x1 traditional convolution and a 3x3x3 depthwise + convolution, i.e., Interaction-preserved channel-separated + bottleneck block. + If set to 'ir', it will replace the 3x3x3 conv2 layer with a + 3x3x3 depthwise convolution, which is derived from preserved + bottleneck block by removing the extra 1x1x1 convolution, + i.e., Interaction-reduced channel-separated bottleneck block. + Default: 'ir'. + args (position arguments): Position arguments for Bottleneck. + kwargs (dict, optional): Keyword arguments for Bottleneck. + """ + + def __init__(self, + inplanes, + planes, + *args, + bottleneck_mode='ir', + **kwargs): + super(CSNBottleneck3d, self).__init__(inplanes, planes, *args, + **kwargs) + self.bottleneck_mode = bottleneck_mode + conv2 = [] + if self.bottleneck_mode == 'ip': + conv2.append( + nn.Conv3d(planes, planes, kernel_size=1, stride=1, bias=False)) + conv2_kernel_size = self.conv2.conv.kernel_size + conv2_stride = self.conv2.conv.stride + conv2_padding = self.conv2.conv.padding + conv2_dilation = self.conv2.conv.dilation + conv2_bias = bool(self.conv2.conv.bias) + self.conv2 = ConvModule( + planes, + planes, + conv2_kernel_size, + stride=conv2_stride, + padding=conv2_padding, + dilation=conv2_dilation, + bias=conv2_bias, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + groups=planes) + conv2.append(self.conv2) + self.conv2 = nn.Sequential(*conv2) + + +@BACKBONES.register_module() +class ResNet3dCSN(ResNet3d): + """ResNet backbone for CSN. + + Args: + depth (int): Depth of ResNetCSN, from {18, 34, 50, 101, 152}. + pretrained (str | None): Name of pretrained model. + temporal_strides (tuple[int]): + Temporal strides of residual blocks of each stage. + Default: (1, 2, 2, 2). + conv1_kernel (tuple[int]): Kernel size of the first conv layer. + Default: (3, 7, 7). + conv1_stride_t (int): Temporal stride of the first conv layer. + Default: 1. + pool1_stride_t (int): Temporal stride of the first pooling layer. + Default: 1. + norm_cfg (dict): Config for norm layers. required keys are `type` and + `requires_grad`. + Default: dict(type='BN3d', requires_grad=True, eps=1e-3). + inflate_style (str): `3x1x1` or `1x1x1`. which determines the kernel + sizes and padding strides for conv1 and conv2 in each block. + Default: '3x3x3'. + bottleneck_mode (str): Determine which ways to factorize a 3D + bottleneck block using channel-separated convolutional networks. + If set to 'ip', it will replace the 3x3x3 conv2 layer with a + 1x1x1 traditional convolution and a 3x3x3 depthwise + convolution, i.e., Interaction-preserved channel-separated + bottleneck block. + If set to 'ir', it will replace the 3x3x3 conv2 layer with a + 3x3x3 depthwise convolution, which is derived from preserved + bottleneck block by removing the extra 1x1x1 convolution, + i.e., Interaction-reduced channel-separated bottleneck block. + Default: 'ip'. + kwargs (dict, optional): Key arguments for "make_res_layer". + """ + + def __init__(self, + depth, + pretrained, + temporal_strides=(1, 2, 2, 2), + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + norm_cfg=dict(type='BN3d', requires_grad=True, eps=1e-3), + inflate_style='3x3x3', + bottleneck_mode='ir', + bn_frozen=False, + **kwargs): + self.arch_settings = { + # 18: (BasicBlock3d, (2, 2, 2, 2)), + # 34: (BasicBlock3d, (3, 4, 6, 3)), + 50: (CSNBottleneck3d, (3, 4, 6, 3)), + 101: (CSNBottleneck3d, (3, 4, 23, 3)), + 152: (CSNBottleneck3d, (3, 8, 36, 3)) + } + self.bn_frozen = bn_frozen + if bottleneck_mode not in ['ip', 'ir']: + raise ValueError(f'Bottleneck mode must be "ip" or "ir",' + f'but got {bottleneck_mode}.') + super(ResNet3dCSN, self).__init__( + depth, + pretrained, + temporal_strides=temporal_strides, + conv1_kernel=conv1_kernel, + conv1_stride_t=conv1_stride_t, + pool1_stride_t=pool1_stride_t, + norm_cfg=norm_cfg, + inflate_style=inflate_style, + bottleneck_mode=bottleneck_mode, + **kwargs) + + def train(self, mode=True): + super(ResNet3d, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + if self.bn_frozen: + for param in m.parameters(): + param.requires_grad = False diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowfast.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowfast.py new file mode 100644 index 0000000..c2f0301 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowfast.py @@ -0,0 +1,493 @@ +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, kaiming_init +from mmcv.runner import _load_checkpoint, load_checkpoint +from mmcv.utils import print_log + +from ...utils import get_root_logger +from ..registry import BACKBONES +from .resnet3d import ResNet3d + + +class ResNet3dPathway(ResNet3d): + """A pathway of Slowfast based on ResNet3d. + + Args: + *args (arguments): Arguments same as :class:``ResNet3d``. + lateral (bool): Determines whether to enable the lateral connection + from another pathway. Default: False. + speed_ratio (int): Speed ratio indicating the ratio between time + dimension of the fast and slow pathway, corresponding to the + ``alpha`` in the paper. Default: 8. + channel_ratio (int): Reduce the channel number of fast pathway + by ``channel_ratio``, corresponding to ``beta`` in the paper. + Default: 8. + fusion_kernel (int): The kernel size of lateral fusion. + Default: 5. + **kwargs (keyword arguments): Keywords arguments for ResNet3d. + """ + + def __init__(self, + *args, + lateral=False, + speed_ratio=8, + channel_ratio=8, + fusion_kernel=5, + **kwargs): + self.lateral = lateral + self.speed_ratio = speed_ratio + self.channel_ratio = channel_ratio + self.fusion_kernel = fusion_kernel + super().__init__(*args, **kwargs) + self.inplanes = self.base_channels + if self.lateral: + self.conv1_lateral = ConvModule( + self.inplanes // self.channel_ratio, + # https://arxiv.org/abs/1812.03982, the + # third type of lateral connection has out_channel: + # 2 * \beta * C + self.inplanes * 2 // self.channel_ratio, + kernel_size=(fusion_kernel, 1, 1), + stride=(self.speed_ratio, 1, 1), + padding=((fusion_kernel - 1) // 2, 0, 0), + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=None, + act_cfg=None) + + self.lateral_connections = [] + for i in range(len(self.stage_blocks)): + planes = self.base_channels * 2**i + self.inplanes = planes * self.block.expansion + + if lateral and i != self.num_stages - 1: + # no lateral connection needed in final stage + lateral_name = f'layer{(i + 1)}_lateral' + setattr( + self, lateral_name, + ConvModule( + self.inplanes // self.channel_ratio, + self.inplanes * 2 // self.channel_ratio, + kernel_size=(fusion_kernel, 1, 1), + stride=(self.speed_ratio, 1, 1), + padding=((fusion_kernel - 1) // 2, 0, 0), + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=None, + act_cfg=None)) + self.lateral_connections.append(lateral_name) + + def make_res_layer(self, + block, + inplanes, + planes, + blocks, + spatial_stride=1, + temporal_stride=1, + dilation=1, + style='pytorch', + inflate=1, + inflate_style='3x1x1', + non_local=0, + non_local_cfg=dict(), + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + with_cp=False): + """Build residual layer for Slowfast. + + Args: + block (nn.Module): Residual module to be built. + inplanes (int): Number of channels for the input + feature in each block. + planes (int): Number of channels for the output + feature in each block. + blocks (int): Number of residual blocks. + spatial_stride (int | Sequence[int]): Spatial strides + in residual and conv layers. Default: 1. + temporal_stride (int | Sequence[int]): Temporal strides in + residual and conv layers. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``, + the stride-two layer is the 3x3 conv layer, + otherwise the stride-two layer is the first 1x1 conv layer. + Default: ``pytorch``. + inflate (int | Sequence[int]): Determine whether to inflate + for each block. Default: 1. + inflate_style (str): ``3x1x1`` or ``1x1x1``. which determines + the kernel sizes and padding strides for conv1 and + conv2 in each block. Default: ``3x1x1``. + non_local (int | Sequence[int]): Determine whether to apply + non-local module in the corresponding block of each stages. + Default: 0. + non_local_cfg (dict): Config for non-local module. + Default: ``dict()``. + conv_cfg (dict | None): Config for conv layers. Default: None. + norm_cfg (dict | None): Config for norm layers. Default: None. + act_cfg (dict | None): Config for activate layers. Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + + Returns: + nn.Module: A residual layer for the given config. + """ + inflate = inflate if not isinstance(inflate, + int) else (inflate, ) * blocks + non_local = non_local if not isinstance( + non_local, int) else (non_local, ) * blocks + assert len(inflate) == blocks and len(non_local) == blocks + if self.lateral: + lateral_inplanes = inplanes * 2 // self.channel_ratio + else: + lateral_inplanes = 0 + if (spatial_stride != 1 + or (inplanes + lateral_inplanes) != planes * block.expansion): + downsample = ConvModule( + inplanes + lateral_inplanes, + planes * block.expansion, + kernel_size=1, + stride=(temporal_stride, spatial_stride, spatial_stride), + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + else: + downsample = None + + layers = [] + layers.append( + block( + inplanes + lateral_inplanes, + planes, + spatial_stride, + temporal_stride, + dilation, + downsample, + style=style, + inflate=(inflate[0] == 1), + inflate_style=inflate_style, + non_local=(non_local[0] == 1), + non_local_cfg=non_local_cfg, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) + inplanes = planes * block.expansion + + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + 1, + 1, + dilation, + style=style, + inflate=(inflate[i] == 1), + inflate_style=inflate_style, + non_local=(non_local[i] == 1), + non_local_cfg=non_local_cfg, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + with_cp=with_cp)) + + return nn.Sequential(*layers) + + def inflate_weights(self, logger): + """Inflate the resnet2d parameters to resnet3d pathway. + + The differences between resnet3d and resnet2d mainly lie in an extra + axis of conv kernel. To utilize the pretrained parameters in 2d model, + the weight of conv2d models should be inflated to fit in the shapes of + the 3d counterpart. For pathway the ``lateral_connection`` part should + not be inflated from 2d weights. + + Args: + logger (logging.Logger): The logger used to print + debugging infomation. + """ + + state_dict_r2d = _load_checkpoint(self.pretrained) + if 'state_dict' in state_dict_r2d: + state_dict_r2d = state_dict_r2d['state_dict'] + + inflated_param_names = [] + for name, module in self.named_modules(): + if 'lateral' in name: + continue + if isinstance(module, ConvModule): + # we use a ConvModule to wrap conv+bn+relu layers, thus the + # name mapping is needed + if 'downsample' in name: + # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 + original_conv_name = name + '.0' + # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 + original_bn_name = name + '.1' + else: + # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} + original_conv_name = name + # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} + original_bn_name = name.replace('conv', 'bn') + self._inflate_conv_params(module.conv, state_dict_r2d, + original_conv_name, + inflated_param_names) + self._inflate_bn_params(module.bn, state_dict_r2d, + original_bn_name, inflated_param_names) + + # check if any parameters in the 2d checkpoint are not loaded + remaining_names = set( + state_dict_r2d.keys()) - set(inflated_param_names) + if remaining_names: + logger.info(f'These parameters in the 2d checkpoint are not loaded' + f': {remaining_names}') + + def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d, + inflated_param_names): + """Inflate a conv module from 2d to 3d. + + The differences of conv modules betweene 2d and 3d in Pathway + mainly lie in the inplanes due to lateral connections. To fit the + shapes of the lateral connection counterpart, it will expand + parameters by concatting conv2d parameters and extra zero paddings. + + Args: + conv3d (nn.Module): The destination conv3d module. + state_dict_2d (OrderedDict): The state dict of pretrained 2d model. + module_name_2d (str): The name of corresponding conv module in the + 2d model. + inflated_param_names (list[str]): List of parameters that have been + inflated. + """ + weight_2d_name = module_name_2d + '.weight' + conv2d_weight = state_dict_2d[weight_2d_name] + old_shape = conv2d_weight.shape + new_shape = conv3d.weight.data.shape + kernel_t = new_shape[2] + if new_shape[1] != old_shape[1]: + # Inplanes may be different due to lateral connections + new_channels = new_shape[1] - old_shape[1] + pad_shape = old_shape + pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:] + # Expand parameters by concat extra channels + conv2d_weight = torch.cat( + (conv2d_weight, + torch.zeros(pad_shape).type_as(conv2d_weight).to( + conv2d_weight.device)), + dim=1) + new_weight = conv2d_weight.data.unsqueeze(2).expand_as( + conv3d.weight) / kernel_t + conv3d.weight.data.copy_(new_weight) + inflated_param_names.append(weight_2d_name) + + if getattr(conv3d, 'bias') is not None: + bias_2d_name = module_name_2d + '.bias' + conv3d.bias.data.copy_(state_dict_2d[bias_2d_name]) + inflated_param_names.append(bias_2d_name) + + def _freeze_stages(self): + """Prevent all the parameters from being optimized before + `self.frozen_stages`.""" + if self.frozen_stages >= 0: + self.conv1.eval() + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i != len(self.res_layers) and self.lateral: + # No fusion needed in the final stage + lateral_name = self.lateral_connections[i - 1] + conv_lateral = getattr(self, lateral_name) + conv_lateral.eval() + for param in conv_lateral.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + # Override the init_weights of i3d + super().init_weights() + for module_name in self.lateral_connections: + layer = getattr(self, module_name) + for m in layer.modules(): + if isinstance(m, (nn.Conv3d, nn.Conv2d)): + kaiming_init(m) + + +pathway_cfg = { + 'resnet3d': ResNet3dPathway, + # TODO: BNInceptionPathway +} + + +def build_pathway(cfg, *args, **kwargs): + """Build pathway. + + Args: + cfg (None or dict): cfg should contain: + - type (str): identify conv layer type. + + Returns: + nn.Module: Created pathway. + """ + if not (isinstance(cfg, dict) and 'type' in cfg): + raise TypeError('cfg must be a dict containing the key "type"') + cfg_ = cfg.copy() + + pathway_type = cfg_.pop('type') + if pathway_type not in pathway_cfg: + raise KeyError(f'Unrecognized pathway type {pathway_type}') + + pathway_cls = pathway_cfg[pathway_type] + pathway = pathway_cls(*args, **kwargs, **cfg_) + + return pathway + + +@BACKBONES.register_module() +class ResNet3dSlowFast(nn.Module): + """Slowfast backbone. + + This module is proposed in `SlowFast Networks for Video Recognition + `_ + + Args: + pretrained (str): The file path to a pretrained model. + resample_rate (int): A large temporal stride ``resample_rate`` + on input frames, corresponding to the :math:`\\tau` in the paper. + i.e., it processes only one out of ``resample_rate`` frames. + Default: 16. + speed_ratio (int): Speed ratio indicating the ratio between time + dimension of the fast and slow pathway, corresponding to the + :math:`\\alpha` in the paper. Default: 8. + channel_ratio (int): Reduce the channel number of fast pathway + by ``channel_ratio``, corresponding to :math:`\\beta` in the paper. + Default: 8. + slow_pathway (dict): Configuration of slow branch, should contain + necessary arguments for building the specific type of pathway + and: + type (str): type of backbone the pathway bases on. + lateral (bool): determine whether to build lateral connection + for the pathway.Default: + + .. code-block:: Python + + dict(type='ResNetPathway', + lateral=True, depth=50, pretrained=None, + conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), + conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1)) + + fast_pathway (dict): Configuration of fast branch, similar to + `slow_pathway`. Default: + + .. code-block:: Python + + dict(type='ResNetPathway', + lateral=False, depth=50, pretrained=None, base_channels=8, + conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1) + """ + + def __init__(self, + pretrained, + resample_rate=8, + speed_ratio=8, + channel_ratio=8, + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=True, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1)), + fast_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + base_channels=8, + conv1_kernel=(5, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1)): + super().__init__() + self.pretrained = pretrained + self.resample_rate = resample_rate + self.speed_ratio = speed_ratio + self.channel_ratio = channel_ratio + + if slow_pathway['lateral']: + slow_pathway['speed_ratio'] = speed_ratio + slow_pathway['channel_ratio'] = channel_ratio + + self.slow_path = build_pathway(slow_pathway) + self.fast_path = build_pathway(fast_pathway) + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if isinstance(self.pretrained, str): + logger = get_root_logger() + msg = f'load model from: {self.pretrained}' + print_log(msg, logger=logger) + # Directly load 3D model. + load_checkpoint(self, self.pretrained, strict=True, logger=logger) + elif self.pretrained is None: + # Init two branch seperately. + self.fast_path.init_weights() + self.slow_path.init_weights() + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + tuple[torch.Tensor]: The feature of the input samples extracted + by the backbone. + """ + x_slow = nn.functional.interpolate( + x, + mode='nearest', + scale_factor=(1.0 / self.resample_rate, 1.0, 1.0)) + x_slow = self.slow_path.conv1(x_slow) + x_slow = self.slow_path.maxpool(x_slow) + + x_fast = nn.functional.interpolate( + x, + mode='nearest', + scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0, + 1.0)) + x_fast = self.fast_path.conv1(x_fast) + x_fast = self.fast_path.maxpool(x_fast) + + if self.slow_path.lateral: + x_fast_lateral = self.slow_path.conv1_lateral(x_fast) + x_slow = torch.cat((x_slow, x_fast_lateral), dim=1) + + for i, layer_name in enumerate(self.slow_path.res_layers): + res_layer = getattr(self.slow_path, layer_name) + x_slow = res_layer(x_slow) + res_layer_fast = getattr(self.fast_path, layer_name) + x_fast = res_layer_fast(x_fast) + if (i != len(self.slow_path.res_layers) - 1 + and self.slow_path.lateral): + # No fusion needed in the final stage + lateral_name = self.slow_path.lateral_connections[i] + conv_lateral = getattr(self.slow_path, lateral_name) + x_fast_lateral = conv_lateral(x_fast) + x_slow = torch.cat((x_slow, x_fast_lateral), dim=1) + + out = (x_slow, x_fast) + + return out diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowonly.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowonly.py new file mode 100644 index 0000000..ad1f750 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet3d_slowonly.py @@ -0,0 +1,42 @@ +from ..registry import BACKBONES +from .resnet3d_slowfast import ResNet3dPathway + + +@BACKBONES.register_module() +class ResNet3dSlowOnly(ResNet3dPathway): + """SlowOnly backbone based on ResNet3dPathway. + + Args: + *args (arguments): Arguments same as :class:`ResNet3dPathway`. + conv1_kernel (Sequence[int]): Kernel size of the first conv layer. + Default: (1, 7, 7). + conv1_stride_t (int): Temporal stride of the first conv layer. + Default: 1. + pool1_stride_t (int): Temporal stride of the first pooling layer. + Default: 1. + inflate (Sequence[int]): Inflate Dims of each block. + Default: (0, 0, 1, 1). + **kwargs (keyword arguments): Keywords arguments for + :class:`ResNet3dPathway`. + """ + + def __init__(self, + *args, + lateral=False, + conv1_kernel=(1, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + with_pool2=False, + **kwargs): + super().__init__( + *args, + lateral=lateral, + conv1_kernel=conv1_kernel, + conv1_stride_t=conv1_stride_t, + pool1_stride_t=pool1_stride_t, + inflate=inflate, + with_pool2=with_pool2, + **kwargs) + + assert not self.lateral diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_audio.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_audio.py new file mode 100644 index 0000000..ea5792d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_audio.py @@ -0,0 +1,373 @@ +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, constant_init, kaiming_init +from mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.utils import _ntuple + +from mmaction.models.registry import BACKBONES +from mmaction.utils import get_root_logger + + +class Bottleneck2dAudio(nn.Module): + """Bottleneck2D block for ResNet2D. + + Args: + inplanes (int): Number of channels for the input in first conv3d layer. + planes (int): Number of channels produced by some norm/conv3d layers. + stride (int | tuple[int]): Stride in the conv layer. Default: 1. + dilation (int): Spacing between kernel elements. Default: 1. + downsample (nn.Module): Downsample layer. Default: None. + factorize (bool): Whether to factorize kernel. Default: True. + norm_cfg (dict): + Config for norm layers. required keys are `type` and + `requires_grad`. Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=2, + dilation=1, + downsample=None, + factorize=True, + norm_cfg=None, + with_cp=False): + super().__init__() + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.factorize = factorize + self.norm_cfg = norm_cfg + self.with_cp = with_cp + + self.conv1_stride = 1 + self.conv2_stride = stride + + conv1_kernel_size = (1, 1) + conv1_padding = 0 + conv2_kernel_size = (3, 3) + conv2_padding = (dilation, dilation) + self.conv1 = ConvModule( + inplanes, + planes, + kernel_size=conv1_kernel_size, + padding=conv1_padding, + dilation=dilation, + norm_cfg=self.norm_cfg, + bias=False) + self.conv2 = ConvModule( + planes, + planes, + kernel_size=conv2_kernel_size, + stride=stride, + padding=conv2_padding, + dilation=dilation, + bias=False, + conv_cfg=dict(type='ConvAudio') if factorize else dict( + type='Conv'), + norm_cfg=None, + act_cfg=None) + self.conv3 = ConvModule( + 2 * planes if factorize else planes, + planes * self.expansion, + kernel_size=1, + bias=False, + norm_cfg=self.norm_cfg, + act_cfg=None) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + def forward(self, x): + + def _inner_forward(x): + identity = x + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + + if self.downsample is not None: + identity = self.downsample(x) + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNetAudio(nn.Module): + """ResNet 2d audio backbone. Reference: + + `_. + + Args: + depth (int): Depth of resnet, from {50, 101, 152}. + pretrained (str | None): Name of pretrained model. + in_channels (int): Channel num of input features. Default: 1. + base_channels (int): Channel num of stem output features. Default: 32. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of residual blocks of each stage. + Default: (1, 2, 2, 2). + dilations (Sequence[int]): Dilation of each stage. + Default: (1, 1, 1, 1). + conv1_kernel (int): Kernel size of the first conv layer. Default: 9. + conv1_stride (int | tuple[int]): Stride of the first conv layer. + Default: 1. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + factorize (Sequence[int]): factorize Dims of each block for audio. + Default: (1, 1, 0, 0). + norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze + running stats (mean and var). Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict): Config for norm layers. Default: dict(type='Conv'). + norm_cfg (dict): + Config for norm layers. required keys are `type` and + `requires_grad`. Default: dict(type='BN2d', requires_grad=True). + act_cfg (dict): Config for activate layers. + Default: dict(type='ReLU', inplace=True). + zero_init_residual (bool): + Whether to use zero initialization for residual block, + Default: True. + """ + + arch_settings = { + # 18: (BasicBlock2dAudio, (2, 2, 2, 2)), + # 34: (BasicBlock2dAudio, (3, 4, 6, 3)), + 50: (Bottleneck2dAudio, (3, 4, 6, 3)), + 101: (Bottleneck2dAudio, (3, 4, 23, 3)), + 152: (Bottleneck2dAudio, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + pretrained, + in_channels=1, + num_stages=4, + base_channels=32, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + conv1_kernel=9, + conv1_stride=1, + frozen_stages=-1, + factorize=(1, 1, 0, 0), + norm_eval=False, + with_cp=False, + conv_cfg=dict(type='Conv'), + norm_cfg=dict(type='BN2d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + zero_init_residual=True): + super().__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.pretrained = pretrained + self.in_channels = in_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.dilations = dilations + self.conv1_kernel = conv1_kernel + self.conv1_stride = conv1_stride + self.frozen_stages = frozen_stages + self.stage_factorization = _ntuple(num_stages)(factorize) + self.norm_eval = norm_eval + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.zero_init_residual = zero_init_residual + + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = self.base_channels + + self._make_stem_layer() + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + planes = self.base_channels * 2**i + res_layer = self.make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + factorize=self.stage_factorization[i], + norm_cfg=self.norm_cfg, + with_cp=with_cp) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self.feat_dim = self.block.expansion * self.base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_res_layer(self, + block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + factorize=1, + norm_cfg=None, + with_cp=False): + """Build residual layer for ResNetAudio. + + Args: + block (nn.Module): Residual module to be built. + inplanes (int): Number of channels for the input feature + in each block. + planes (int): Number of channels for the output feature + in each block. + blocks (int): Number of residual blocks. + strides (Sequence[int]): Strides of residual blocks of each stage. + Default: (1, 2, 2, 2). + dilation (int): Spacing between kernel elements. Default: 1. + factorize (int | Sequence[int]): Determine whether to factorize + for each block. Default: 1. + norm_cfg (dict): + Config for norm layers. required keys are `type` and + `requires_grad`. Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + + Returns: + A residual layer for the given config. + """ + factorize = factorize if not isinstance( + factorize, int) else (factorize, ) * blocks + assert len(factorize) == blocks + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = ConvModule( + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False, + norm_cfg=norm_cfg, + act_cfg=None) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + dilation, + downsample, + factorize=(factorize[0] == 1), + norm_cfg=norm_cfg, + with_cp=with_cp)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + 1, + dilation, + factorize=(factorize[i] == 1), + norm_cfg=norm_cfg, + with_cp=with_cp)) + + return nn.Sequential(*layers) + + def _make_stem_layer(self): + """Construct the stem layers consists of a conv+norm+act module and a + pooling layer.""" + self.conv1 = ConvModule( + self.in_channels, + self.base_channels, + kernel_size=self.conv1_kernel, + stride=self.conv1_stride, + bias=False, + conv_cfg=dict(type='ConvAudio', op='sum'), + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _freeze_stages(self): + """Prevent all the parameters from being optimized before + ``self.frozen_stages``.""" + if self.frozen_stages >= 0: + self.conv1.bn.eval() + for m in [self.conv1.conv, self.conv1.bn]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if isinstance(self.pretrained, str): + logger = get_root_logger() + logger.info(f'load model from: {self.pretrained}') + + load_checkpoint(self, self.pretrained, strict=False, logger=logger) + + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, _BatchNorm): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck2dAudio): + constant_init(m.conv3.bn, 0) + + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The feature of the input samples extracted + by the backbone. + """ + x = self.conv1(x) + for layer_name in self.res_layers: + res_layer = getattr(self, layer_name) + x = res_layer(x) + return x + + def train(self, mode=True): + """Set the optimization status when training.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tin.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tin.py new file mode 100644 index 0000000..7e71e9e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tin.py @@ -0,0 +1,376 @@ +import warnings + +import torch +import torch.nn as nn + +from ..registry import BACKBONES +from .resnet_tsm import ResNetTSM + +try: + from mmcv.ops import tin_shift +except (ImportError, ModuleNotFoundError): + warnings.warn('Please install mmcv-full to support "tin_shift"') + + +def linear_sampler(data, offset): + """Differentiable Temporal-wise Frame Sampling, which is essentially a + linear interpolation process. + + It gets the feature map which has been split into several groups + and shift them by different offsets according to their groups. + Then compute the weighted sum along with the temporal dimension. + + Args: + data (torch.Tensor): Split data for certain group in shape + [N, num_segments, C, H, W]. + offset (torch.Tensor): Data offsets for this group data in shape + [N, num_segments]. + """ + # [N, num_segments, C, H, W] + n, t, c, h, w = data.shape + + # offset0, offset1: [N, num_segments] + offset0 = torch.floor(offset).int() + offset1 = offset0 + 1 + + # data, data0, data1: [N, num_segments, C, H * W] + data = data.view(n, t, c, h * w).contiguous() + data0 = tin_shift(data, offset0) + data1 = tin_shift(data, offset1) + + # weight0, weight1: [N, num_segments] + weight0 = 1 - (offset - offset0.float()) + weight1 = 1 - weight0 + + # weight0, weight1: + # [N, num_segments] -> [N, num_segments, C // num_segments] -> [N, C] + group_size = offset.shape[1] + weight0 = weight0[:, :, None].repeat(1, 1, c // group_size) + weight0 = weight0.view(weight0.size(0), -1) + weight1 = weight1[:, :, None].repeat(1, 1, c // group_size) + weight1 = weight1.view(weight1.size(0), -1) + + # weight0, weight1: [N, C] -> [N, 1, C, 1] + weight0 = weight0[:, None, :, None] + weight1 = weight1[:, None, :, None] + + # output: [N, num_segments, C, H * W] -> [N, num_segments, C, H, W] + output = weight0 * data0 + weight1 * data1 + output = output.view(n, t, c, h, w) + + return output + + +class CombineNet(nn.Module): + """Combine Net. + + It combines Temporal interlace module with some part of ResNet layer. + + Args: + net1 (nn.module): Temporal interlace module. + net2 (nn.module): Some part of ResNet layer. + """ + + def __init__(self, net1, net2): + super().__init__() + self.net1 = net1 + self.net2 = net2 + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + # input shape: [num_batches * num_segments, C, H, W] + # output x shape: [num_batches * num_segments, C, H, W] + x = self.net1(x) + # [num_batches * num_segments, C, H, W] + x = self.net2(x) + return x + + +class WeightNet(nn.Module): + """WeightNet in Temporal interlace module. + + The WeightNet consists of two parts: one convolution layer + and a sigmoid function. Following the convolution layer, the sigmoid + function and rescale module can scale our output to the range (0, 2). + Here we set the initial bias of the convolution layer to 0, and the + final initial output will be 1.0. + + Args: + in_channels (int): Channel num of input features. + groups (int): Number of groups for fc layer outputs. + """ + + def __init__(self, in_channels, groups): + super().__init__() + self.sigmoid = nn.Sigmoid() + self.groups = groups + + self.conv = nn.Conv1d(in_channels, groups, 3, padding=1) + + self.init_weights() + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + # we set the initial bias of the convolution + # layer to 0, and the final initial output will be 1.0 + self.conv.bias.data[...] = 0 + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + # calculate weight + # [N, C, T] + n, _, t = x.shape + # [N, groups, T] + x = self.conv(x) + x = x.view(n, self.groups, t) + # [N, T, groups] + x = x.permute(0, 2, 1) + + # scale the output to range (0, 2) + x = 2 * self.sigmoid(x) + # [N, T, groups] + return x + + +class OffsetNet(nn.Module): + """OffsetNet in Temporal interlace module. + + The OffsetNet consists of one convolution layer and two fc layers + with a relu activation following with a sigmoid function. Following + the convolution layer, two fc layers and relu are applied to the output. + Then, apply the sigmoid function with a multiply factor and a minus 0.5 + to transform the output to (-4, 4). + + Args: + in_channels (int): Channel num of input features. + groups (int): Number of groups for fc layer outputs. + num_segments (int): Number of frame segments. + """ + + def __init__(self, in_channels, groups, num_segments): + super().__init__() + self.sigmoid = nn.Sigmoid() + # hard code ``kernel_size`` and ``padding`` according to original repo. + kernel_size = 3 + padding = 1 + + self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding) + self.fc1 = nn.Linear(num_segments, num_segments) + self.relu = nn.ReLU() + self.fc2 = nn.Linear(num_segments, groups) + + self.init_weights() + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + # The bias of the last fc layer is initialized to + # make the post-sigmoid output start from 1 + self.fc2.bias.data[...] = 0.5108 + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + # calculate offset + # [N, C, T] + n, _, t = x.shape + # [N, 1, T] + x = self.conv(x) + # [N, T] + x = x.view(n, t) + # [N, T] + x = self.relu(self.fc1(x)) + # [N, groups] + x = self.fc2(x) + # [N, 1, groups] + x = x.view(n, 1, -1) + + # to make sure the output is in (-t/2, t/2) + # where t = num_segments = 8 + x = 4 * (self.sigmoid(x) - 0.5) + # [N, 1, groups] + return x + + +class TemporalInterlace(nn.Module): + """Temporal interlace module. + + This module is proposed in `Temporal Interlacing Network + `_ + + Args: + in_channels (int): Channel num of input features. + num_segments (int): Number of frame segments. Default: 3. + shift_div (int): Number of division parts for shift. Default: 1. + """ + + def __init__(self, in_channels, num_segments=3, shift_div=1): + super().__init__() + self.num_segments = num_segments + self.shift_div = shift_div + self.in_channels = in_channels + # hard code ``deform_groups`` according to original repo. + self.deform_groups = 2 + + self.offset_net = OffsetNet(in_channels // shift_div, + self.deform_groups, num_segments) + self.weight_net = WeightNet(in_channels // shift_div, + self.deform_groups) + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + # x: [N, C, H, W], + # where N = num_batches x num_segments, C = shift_div * num_folds + n, c, h, w = x.size() + num_batches = n // self.num_segments + num_folds = c // self.shift_div + + # x_out: [num_batches x num_segments, C, H, W] + x_out = torch.zeros((n, c, h, w), device=x.device) + # x_descriptor: [num_batches, num_segments, num_folds, H, W] + x_descriptor = x[:, :num_folds, :, :].view(num_batches, + self.num_segments, + num_folds, h, w) + + # x should only obtain information on temporal and channel dimensions + # x_pooled: [num_batches, num_segments, num_folds, W] + x_pooled = torch.mean(x_descriptor, 3) + # x_pooled: [num_batches, num_segments, num_folds] + x_pooled = torch.mean(x_pooled, 3) + # x_pooled: [num_batches, num_folds, num_segments] + x_pooled = x_pooled.permute(0, 2, 1).contiguous() + + # Calculate weight and bias, here groups = 2 + # x_offset: [num_batches, groups] + x_offset = self.offset_net(x_pooled).view(num_batches, -1) + # x_weight: [num_batches, num_segments, groups] + x_weight = self.weight_net(x_pooled) + + # x_offset: [num_batches, 2 * groups] + x_offset = torch.cat([x_offset, -x_offset], 1) + # x_shift: [num_batches, num_segments, num_folds, H, W] + x_shift = linear_sampler(x_descriptor, x_offset) + + # x_weight: [num_batches, num_segments, groups, 1] + x_weight = x_weight[:, :, :, None] + # x_weight: + # [num_batches, num_segments, groups * 2, c // self.shift_div // 4] + x_weight = x_weight.repeat(1, 1, 2, num_folds // 2 // 2) + # x_weight: + # [num_batches, num_segments, c // self.shift_div = num_folds] + x_weight = x_weight.view(x_weight.size(0), x_weight.size(1), -1) + + # x_weight: [num_batches, num_segments, num_folds, 1, 1] + x_weight = x_weight[:, :, :, None, None] + # x_shift: [num_batches, num_segments, num_folds, H, W] + x_shift = x_shift * x_weight + # x_shift: [num_batches, num_segments, num_folds, H, W] + x_shift = x_shift.contiguous().view(n, num_folds, h, w) + + # x_out: [num_batches x num_segments, C, H, W] + x_out[:, :num_folds, :] = x_shift + x_out[:, num_folds:, :] = x[:, num_folds:, :] + + return x_out + + +@BACKBONES.register_module() +class ResNetTIN(ResNetTSM): + """ResNet backbone for TIN. + + Args: + depth (int): Depth of ResNet, from {18, 34, 50, 101, 152}. + num_segments (int): Number of frame segments. Default: 8. + is_tin (bool): Whether to apply temporal interlace. Default: True. + shift_div (int): Number of division parts for shift. Default: 4. + kwargs (dict, optional): Arguments for ResNet. + """ + + def __init__(self, + depth, + num_segments=8, + is_tin=True, + shift_div=4, + **kwargs): + super().__init__(depth, **kwargs) + self.num_segments = num_segments + self.is_tin = is_tin + self.shift_div = shift_div + + def make_temporal_interlace(self): + """Make temporal interlace for some layers.""" + num_segment_list = [self.num_segments] * 4 + assert num_segment_list[-1] > 0 + + n_round = 1 + if len(list(self.layer3.children())) >= 23: + print(f'=> Using n_round {n_round} to insert temporal shift.') + + def make_block_interlace(stage, num_segments, shift_div): + """Apply Deformable shift for a ResNet layer module. + + Args: + stage (nn.module): A ResNet layer to be deformed. + num_segments (int): Number of frame segments. + shift_div (int): Number of division parts for shift. + + Returns: + nn.Sequential: A Sequential container consisted of + deformed Interlace blocks. + """ + blocks = list(stage.children()) + for i, b in enumerate(blocks): + if i % n_round == 0: + tds = TemporalInterlace( + b.conv1.in_channels, + num_segments=num_segments, + shift_div=shift_div) + blocks[i].conv1.conv = CombineNet(tds, + blocks[i].conv1.conv) + return nn.Sequential(*blocks) + + self.layer1 = make_block_interlace(self.layer1, num_segment_list[0], + self.shift_div) + self.layer2 = make_block_interlace(self.layer2, num_segment_list[1], + self.shift_div) + self.layer3 = make_block_interlace(self.layer3, num_segment_list[2], + self.shift_div) + self.layer4 = make_block_interlace(self.layer4, num_segment_list[3], + self.shift_div) + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + super(ResNetTSM, self).init_weights() + if self.is_tin: + self.make_temporal_interlace() + if len(self.non_local_cfg) != 0: + self.make_non_local() diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tsm.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tsm.py new file mode 100644 index 0000000..2c4f999 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/resnet_tsm.py @@ -0,0 +1,294 @@ +import torch +import torch.nn as nn +from mmcv.cnn import NonLocal3d +from torch.nn.modules.utils import _ntuple + +from ..registry import BACKBONES +from .resnet import ResNet + + +class NL3DWrapper(nn.Module): + """3D Non-local wrapper for ResNet50. + + Wrap ResNet layers with 3D NonLocal modules. + + Args: + block (nn.Module): Residual blocks to be built. + num_segments (int): Number of frame segments. + non_local_cfg (dict): Config for non-local layers. Default: ``dict()``. + """ + + def __init__(self, block, num_segments, non_local_cfg=dict()): + super(NL3DWrapper, self).__init__() + self.block = block + self.non_local_cfg = non_local_cfg + self.non_local_block = NonLocal3d(self.block.conv3.norm.num_features, + **self.non_local_cfg) + self.num_segments = num_segments + + def forward(self, x): + x = self.block(x) + + n, c, h, w = x.size() + x = x.view(n // self.num_segments, self.num_segments, c, h, + w).transpose(1, 2).contiguous() + x = self.non_local_block(x) + x = x.transpose(1, 2).contiguous().view(n, c, h, w) + return x + + +class TemporalShift(nn.Module): + """Temporal shift module. + + This module is proposed in + `TSM: Temporal Shift Module for Efficient Video Understanding + `_ + + Args: + net (nn.module): Module to make temporal shift. + num_segments (int): Number of frame segments. Default: 3. + shift_div (int): Number of divisions for shift. Default: 8. + """ + + def __init__(self, net, num_segments=3, shift_div=8): + super().__init__() + self.net = net + self.num_segments = num_segments + self.shift_div = shift_div + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + x = self.shift(x, self.num_segments, shift_div=self.shift_div) + return self.net(x) + + @staticmethod + def shift(x, num_segments, shift_div=3): + """Perform temporal shift operation on the feature. + + Args: + x (torch.Tensor): The input feature to be shifted. + num_segments (int): Number of frame segments. + shift_div (int): Number of divisions for shift. Default: 3. + + Returns: + torch.Tensor: The shifted feature. + """ + # [N, C, H, W] + n, c, h, w = x.size() + + # [N // num_segments, num_segments, C, H*W] + # can't use 5 dimensional array on PPL2D backend for caffe + x = x.view(-1, num_segments, c, h * w) + + # get shift fold + fold = c // shift_div + + # split c channel into three parts: + # left_split, mid_split, right_split + left_split = x[:, :, :fold, :] + mid_split = x[:, :, fold:2 * fold, :] + right_split = x[:, :, 2 * fold:, :] + + # can't use torch.zeros(*A.shape) or torch.zeros_like(A) + # because array on caffe inference must be got by computing + + # shift left on num_segments channel in `left_split` + zeros = left_split - left_split + blank = zeros[:, :1, :, :] + left_split = left_split[:, 1:, :, :] + left_split = torch.cat((left_split, blank), 1) + + # shift right on num_segments channel in `mid_split` + zeros = mid_split - mid_split + blank = zeros[:, :1, :, :] + mid_split = mid_split[:, :-1, :, :] + mid_split = torch.cat((blank, mid_split), 1) + + # right_split: no shift + + # concatenate + out = torch.cat((left_split, mid_split, right_split), 2) + + # [N, C, H, W] + # restore the original dimension + return out.view(n, c, h, w) + + +@BACKBONES.register_module() +class ResNetTSM(ResNet): + """ResNet backbone for TSM. + + Args: + num_segments (int): Number of frame segments. Default: 8. + is_shift (bool): Whether to make temporal shift in reset layers. + Default: True. + non_local (Sequence[int]): Determine whether to apply non-local module + in the corresponding block of each stages. Default: (0, 0, 0, 0). + non_local_cfg (dict): Config for non-local module. Default: ``dict()``. + shift_div (int): Number of div for shift. Default: 8. + shift_place (str): Places in resnet layers for shift, which is chosen + from ['block', 'blockres']. + If set to 'block', it will apply temporal shift to all child blocks + in each resnet layer. + If set to 'blockres', it will apply temporal shift to each `conv1` + layer of all child blocks in each resnet layer. + Default: 'blockres'. + temporal_pool (bool): Whether to add temporal pooling. Default: False. + **kwargs (keyword arguments, optional): Arguments for ResNet. + """ + + def __init__(self, + depth, + num_segments=8, + is_shift=True, + non_local=(0, 0, 0, 0), + non_local_cfg=dict(), + shift_div=8, + shift_place='blockres', + temporal_pool=False, + **kwargs): + super().__init__(depth, **kwargs) + self.num_segments = num_segments + self.is_shift = is_shift + self.shift_div = shift_div + self.shift_place = shift_place + self.temporal_pool = temporal_pool + self.non_local = non_local + self.non_local_stages = _ntuple(self.num_stages)(non_local) + self.non_local_cfg = non_local_cfg + + def make_temporal_shift(self): + """Make temporal shift for some layers.""" + if self.temporal_pool: + num_segment_list = [ + self.num_segments, self.num_segments // 2, + self.num_segments // 2, self.num_segments // 2 + ] + else: + num_segment_list = [self.num_segments] * 4 + if num_segment_list[-1] <= 0: + raise ValueError('num_segment_list[-1] must be positive') + + if self.shift_place == 'block': + + def make_block_temporal(stage, num_segments): + """Make temporal shift on some blocks. + + Args: + stage (nn.Module): Model layers to be shifted. + num_segments (int): Number of frame segments. + + Returns: + nn.Module: The shifted blocks. + """ + blocks = list(stage.children()) + for i, b in enumerate(blocks): + blocks[i] = TemporalShift( + b, num_segments=num_segments, shift_div=self.shift_div) + return nn.Sequential(*blocks) + + self.layer1 = make_block_temporal(self.layer1, num_segment_list[0]) + self.layer2 = make_block_temporal(self.layer2, num_segment_list[1]) + self.layer3 = make_block_temporal(self.layer3, num_segment_list[2]) + self.layer4 = make_block_temporal(self.layer4, num_segment_list[3]) + + elif 'blockres' in self.shift_place: + n_round = 1 + if len(list(self.layer3.children())) >= 23: + n_round = 2 + + def make_block_temporal(stage, num_segments): + """Make temporal shift on some blocks. + + Args: + stage (nn.Module): Model layers to be shifted. + num_segments (int): Number of frame segments. + + Returns: + nn.Module: The shifted blocks. + """ + blocks = list(stage.children()) + for i, b in enumerate(blocks): + if i % n_round == 0: + blocks[i].conv1.conv = TemporalShift( + b.conv1.conv, + num_segments=num_segments, + shift_div=self.shift_div) + return nn.Sequential(*blocks) + + self.layer1 = make_block_temporal(self.layer1, num_segment_list[0]) + self.layer2 = make_block_temporal(self.layer2, num_segment_list[1]) + self.layer3 = make_block_temporal(self.layer3, num_segment_list[2]) + self.layer4 = make_block_temporal(self.layer4, num_segment_list[3]) + + else: + raise NotImplementedError + + def make_temporal_pool(self): + """Make temporal pooling between layer1 and layer2, using a 3D max + pooling layer.""" + + class TemporalPool(nn.Module): + """Temporal pool module. + + Wrap layer2 in ResNet50 with a 3D max pooling layer. + + Args: + net (nn.Module): Module to make temporal pool. + num_segments (int): Number of frame segments. + """ + + def __init__(self, net, num_segments): + super().__init__() + self.net = net + self.num_segments = num_segments + self.max_pool3d = nn.MaxPool3d( + kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)) + + def forward(self, x): + # [N, C, H, W] + n, c, h, w = x.size() + # [N // num_segments, C, num_segments, H, W] + x = x.view(n // self.num_segments, self.num_segments, c, h, + w).transpose(1, 2) + # [N // num_segmnets, C, num_segments // 2, H, W] + x = self.max_pool3d(x) + # [N // 2, C, H, W] + x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w) + return self.net(x) + + self.layer2 = TemporalPool(self.layer2, self.num_segments) + + def make_non_local(self): + # This part is for ResNet50 + for i in range(self.num_stages): + non_local_stage = self.non_local_stages[i] + if sum(non_local_stage) == 0: + continue + + layer_name = f'layer{i + 1}' + res_layer = getattr(self, layer_name) + + for idx, non_local in enumerate(non_local_stage): + if non_local: + res_layer[idx] = NL3DWrapper(res_layer[idx], + self.num_segments, + self.non_local_cfg) + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + super().init_weights() + if self.is_shift: + self.make_temporal_shift() + if len(self.non_local_cfg) != 0: + self.make_non_local() + if self.temporal_pool: + self.make_temporal_pool() diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/x3d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/x3d.py new file mode 100644 index 0000000..4d6b85c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/backbones/x3d.py @@ -0,0 +1,523 @@ +import math + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, Swish, build_activation_layer, constant_init, + kaiming_init) +from mmcv.runner import load_checkpoint +from mmcv.utils import _BatchNorm + +from ...utils import get_root_logger +from ..registry import BACKBONES + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super().__init__() + self.avg_pool = nn.AdaptiveAvgPool3d(1) + self.bottleneck = self._round_width(channels, reduction) + self.fc1 = nn.Conv3d( + channels, self.bottleneck, kernel_size=1, padding=0) + self.relu = nn.ReLU() + self.fc2 = nn.Conv3d( + self.bottleneck, channels, kernel_size=1, padding=0) + self.sigmoid = nn.Sigmoid() + + @staticmethod + def _round_width(width, multiplier, min_width=8, divisor=8): + width *= multiplier + min_width = min_width or divisor + width_out = max(min_width, + int(width + divisor / 2) // divisor * divisor) + if width_out < 0.9 * width: + width_out += divisor + return int(width_out) + + def forward(self, x): + module_input = x + x = self.avg_pool(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class BlockX3D(nn.Module): + """BlockX3D 3d building block for X3D. + + Args: + inplanes (int): Number of channels for the input in first conv3d layer. + planes (int): Number of channels produced by some norm/conv3d layers. + outplanes (int): Number of channels produced by final the conv3d layer. + spatial_stride (int): Spatial stride in the conv3d layer. Default: 1. + downsample (nn.Module | None): Downsample layer. Default: None. + se_ratio (float | None): The reduction ratio of squeeze and excitation + unit. If set as None, it means not using SE unit. Default: None. + use_swish (bool): Whether to use swish as the activation function + before and after the 3x3x3 conv. Default: True. + conv_cfg (dict): Config dict for convolution layer. + Default: ``dict(type='Conv3d')``. + norm_cfg (dict): Config for norm layers. required keys are ``type``, + Default: ``dict(type='BN3d')``. + act_cfg (dict): Config dict for activation layer. + Default: ``dict(type='ReLU')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + inplanes, + planes, + outplanes, + spatial_stride=1, + downsample=None, + se_ratio=None, + use_swish=True, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super().__init__() + + self.inplanes = inplanes + self.planes = planes + self.outplanes = outplanes + self.spatial_stride = spatial_stride + self.downsample = downsample + self.se_ratio = se_ratio + self.use_swish = use_swish + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.act_cfg_swish = dict(type='Swish') + self.with_cp = with_cp + + self.conv1 = ConvModule( + in_channels=inplanes, + out_channels=planes, + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + # Here we use the channel-wise conv + self.conv2 = ConvModule( + in_channels=planes, + out_channels=planes, + kernel_size=3, + stride=(1, self.spatial_stride, self.spatial_stride), + padding=1, + groups=planes, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + + self.swish = Swish() + + self.conv3 = ConvModule( + in_channels=planes, + out_channels=outplanes, + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=None) + + if self.se_ratio is not None: + self.se_module = SEModule(planes, self.se_ratio) + + self.relu = build_activation_layer(self.act_cfg) + + def forward(self, x): + """Defines the computation performed at every call.""" + + def _inner_forward(x): + """Forward wrapper for utilizing checkpoint.""" + identity = x + + out = self.conv1(x) + out = self.conv2(out) + if self.se_ratio is not None: + out = self.se_module(out) + + out = self.swish(out) + + out = self.conv3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out + identity + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + out = self.relu(out) + return out + + +# We do not support initialize with 2D pretrain weight for X3D +@BACKBONES.register_module() +class X3D(nn.Module): + """X3D backbone. https://arxiv.org/pdf/2004.04730.pdf. + + Args: + gamma_w (float): Global channel width expansion factor. Default: 1. + gamma_b (float): Bottleneck channel width expansion factor. Default: 1. + gamma_d (float): Network depth expansion factor. Default: 1. + pretrained (str | None): Name of pretrained model. Default: None. + in_channels (int): Channel num of input features. Default: 3. + num_stages (int): Resnet stages. Default: 4. + spatial_strides (Sequence[int]): + Spatial strides of residual blocks of each stage. + Default: ``(1, 2, 2, 2)``. + frozen_stages (int): Stages to be frozen (all param fixed). If set to + -1, it means not freezing any parameters. Default: -1. + se_style (str): The style of inserting SE modules into BlockX3D, 'half' + denotes insert into half of the blocks, while 'all' denotes insert + into all blocks. Default: 'half'. + se_ratio (float | None): The reduction ratio of squeeze and excitation + unit. If set as None, it means not using SE unit. Default: 1 / 16. + use_swish (bool): Whether to use swish as the activation function + before and after the 3x3x3 conv. Default: True. + conv_cfg (dict): Config for conv layers. required keys are ``type`` + Default: ``dict(type='Conv3d')``. + norm_cfg (dict): Config for norm layers. required keys are ``type`` and + ``requires_grad``. + Default: ``dict(type='BN3d', requires_grad=True)``. + act_cfg (dict): Config dict for activation layer. + Default: ``dict(type='ReLU', inplace=True)``. + norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze + running stats (mean and var). Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): + Whether to use zero initialization for residual block, + Default: True. + kwargs (dict, optional): Key arguments for "make_res_layer". + """ + + def __init__(self, + gamma_w=1.0, + gamma_b=1.0, + gamma_d=1.0, + pretrained=None, + in_channels=3, + num_stages=4, + spatial_strides=(2, 2, 2, 2), + frozen_stages=-1, + se_style='half', + se_ratio=1 / 16, + use_swish=True, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + **kwargs): + super().__init__() + self.gamma_w = gamma_w + self.gamma_b = gamma_b + self.gamma_d = gamma_d + + self.pretrained = pretrained + self.in_channels = in_channels + # Hard coded, can be changed by gamma_w + self.base_channels = 24 + self.stage_blocks = [1, 2, 5, 3] + + # apply parameters gamma_w and gamma_d + self.base_channels = self._round_width(self.base_channels, + self.gamma_w) + + self.stage_blocks = [ + self._round_repeats(x, self.gamma_d) for x in self.stage_blocks + ] + + self.num_stages = num_stages + assert 1 <= num_stages <= 4 + self.spatial_strides = spatial_strides + assert len(spatial_strides) == num_stages + self.frozen_stages = frozen_stages + + self.se_style = se_style + assert self.se_style in ['all', 'half'] + self.se_ratio = se_ratio + assert (self.se_ratio is None) or (self.se_ratio > 0) + self.use_swish = use_swish + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + self.block = BlockX3D + self.stage_blocks = self.stage_blocks[:num_stages] + self.layer_inplanes = self.base_channels + self._make_stem_layer() + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + spatial_stride = spatial_strides[i] + inplanes = self.base_channels * 2**i + planes = int(inplanes * self.gamma_b) + + res_layer = self.make_res_layer( + self.block, + self.layer_inplanes, + inplanes, + planes, + num_blocks, + spatial_stride=spatial_stride, + se_style=self.se_style, + se_ratio=self.se_ratio, + use_swish=self.use_swish, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + act_cfg=self.act_cfg, + with_cp=with_cp, + **kwargs) + self.layer_inplanes = inplanes + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self.feat_dim = self.base_channels * 2**(len(self.stage_blocks) - 1) + self.conv5 = ConvModule( + self.feat_dim, + int(self.feat_dim * self.gamma_b), + kernel_size=1, + stride=1, + padding=0, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.feat_dim = int(self.feat_dim * self.gamma_b) + + @staticmethod + def _round_width(width, multiplier, min_depth=8, divisor=8): + """Round width of filters based on width multiplier.""" + if not multiplier: + return width + + width *= multiplier + min_depth = min_depth or divisor + new_filters = max(min_depth, + int(width + divisor / 2) // divisor * divisor) + if new_filters < 0.9 * width: + new_filters += divisor + return int(new_filters) + + @staticmethod + def _round_repeats(repeats, multiplier): + """Round number of layers based on depth multiplier.""" + if not multiplier: + return repeats + return int(math.ceil(multiplier * repeats)) + + # the module is parameterized with gamma_b + # no temporal_stride + def make_res_layer(self, + block, + layer_inplanes, + inplanes, + planes, + blocks, + spatial_stride=1, + se_style='half', + se_ratio=None, + use_swish=True, + norm_cfg=None, + act_cfg=None, + conv_cfg=None, + with_cp=False, + **kwargs): + """Build residual layer for ResNet3D. + + Args: + block (nn.Module): Residual module to be built. + layer_inplanes (int): Number of channels for the input feature + of the res layer. + inplanes (int): Number of channels for the input feature in each + block, which equals to base_channels * gamma_w. + planes (int): Number of channels for the output feature in each + block, which equals to base_channel * gamma_w * gamma_b. + blocks (int): Number of residual blocks. + spatial_stride (int): Spatial strides in residual and conv layers. + Default: 1. + se_style (str): The style of inserting SE modules into BlockX3D, + 'half' denotes insert into half of the blocks, while 'all' + denotes insert into all blocks. Default: 'half'. + se_ratio (float | None): The reduction ratio of squeeze and + excitation unit. If set as None, it means not using SE unit. + Default: None. + use_swish (bool): Whether to use swish as the activation function + before and after the 3x3x3 conv. Default: True. + conv_cfg (dict | None): Config for norm layers. Default: None. + norm_cfg (dict | None): Config for norm layers. Default: None. + act_cfg (dict | None): Config for activate layers. Default: None. + with_cp (bool | None): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + + Returns: + nn.Module: A residual layer for the given config. + """ + downsample = None + if spatial_stride != 1 or layer_inplanes != inplanes: + downsample = ConvModule( + layer_inplanes, + inplanes, + kernel_size=1, + stride=(1, spatial_stride, spatial_stride), + padding=0, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + use_se = [False] * blocks + if self.se_style == 'all': + use_se = [True] * blocks + elif self.se_style == 'half': + use_se = [i % 2 == 0 for i in range(blocks)] + else: + raise NotImplementedError + + layers = [] + layers.append( + block( + layer_inplanes, + planes, + inplanes, + spatial_stride=spatial_stride, + downsample=downsample, + se_ratio=se_ratio if use_se[0] else None, + use_swish=use_swish, + norm_cfg=norm_cfg, + conv_cfg=conv_cfg, + act_cfg=act_cfg, + with_cp=with_cp, + **kwargs)) + + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + inplanes, + spatial_stride=1, + se_ratio=se_ratio if use_se[i] else None, + use_swish=use_swish, + norm_cfg=norm_cfg, + conv_cfg=conv_cfg, + act_cfg=act_cfg, + with_cp=with_cp, + **kwargs)) + + return nn.Sequential(*layers) + + def _make_stem_layer(self): + """Construct the stem layers consists of a conv+norm+act module and a + pooling layer.""" + self.conv1_s = ConvModule( + self.in_channels, + self.base_channels, + kernel_size=(1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=None, + act_cfg=None) + self.conv1_t = ConvModule( + self.base_channels, + self.base_channels, + kernel_size=(5, 1, 1), + stride=(1, 1, 1), + padding=(2, 0, 0), + groups=self.base_channels, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _freeze_stages(self): + """Prevent all the parameters from being optimized before + ``self.frozen_stages``.""" + if self.frozen_stages >= 0: + self.conv1_s.eval() + self.conv1_t.eval() + for param in self.conv1_s.parameters(): + param.requires_grad = False + for param in self.conv1_t.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + if isinstance(self.pretrained, str): + logger = get_root_logger() + logger.info(f'load model from: {self.pretrained}') + + load_checkpoint(self, self.pretrained, strict=False, logger=logger) + + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv3d): + kaiming_init(m) + elif isinstance(m, _BatchNorm): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, BlockX3D): + constant_init(m.conv3.bn, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The feature of the input + samples extracted by the backbone. + """ + x = self.conv1_s(x) + x = self.conv1_t(x) + for layer_name in self.res_layers: + res_layer = getattr(self, layer_name) + x = res_layer(x) + x = self.conv5(x) + return x + + def train(self, mode=True): + """Set the optimization status when training.""" + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/builder.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/builder.py new file mode 100644 index 0000000..43a4f32 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/builder.py @@ -0,0 +1,70 @@ +import torch.nn as nn +from mmcv.utils import build_from_cfg + +from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS + + +def build(cfg, registry, default_args=None): + """Build a module. + + Args: + cfg (dict, list[dict]): The config of modules, it is either a dict + or a list of configs. + registry (:obj:`Registry`): A registry the module belongs to. + default_args (dict, optional): Default arguments to build the module. + Defaults to None. + + Returns: + nn.Module: A built nn module. + """ + + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return nn.Sequential(*modules) + + return build_from_cfg(cfg, registry, default_args) + + +def build_backbone(cfg): + """Build backbone.""" + return build(cfg, BACKBONES) + + +def build_head(cfg): + """Build head.""" + return build(cfg, HEADS) + + +def build_recognizer(cfg, train_cfg=None, test_cfg=None): + """Build recognizer.""" + return build(cfg, RECOGNIZERS, + dict(train_cfg=train_cfg, test_cfg=test_cfg)) + + +def build_loss(cfg): + """Build loss.""" + return build(cfg, LOSSES) + + +def build_localizer(cfg): + """Build localizer.""" + return build(cfg, LOCALIZERS) + + +def build_model(cfg, train_cfg=None, test_cfg=None): + """Build model.""" + args = cfg.copy() + obj_type = args.pop('type') + if obj_type in LOCALIZERS: + return build_localizer(cfg) + if obj_type in RECOGNIZERS: + return build_recognizer(cfg, train_cfg, test_cfg) + raise ValueError(f'{obj_type} is not registered in ' + 'LOCALIZERS or RECOGNIZERS') + + +def build_neck(cfg): + """Build neck.""" + return build(cfg, NECKS) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/common/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/common/__init__.py new file mode 100644 index 0000000..ccc3491 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/common/__init__.py @@ -0,0 +1,4 @@ +from .conv2plus1d import Conv2plus1d +from .conv_audio import ConvAudio + +__all__ = ['Conv2plus1d', 'ConvAudio'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv2plus1d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv2plus1d.py new file mode 100644 index 0000000..675b0e2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv2plus1d.py @@ -0,0 +1,104 @@ +import torch.nn as nn +from mmcv.cnn import CONV_LAYERS, build_norm_layer, constant_init, kaiming_init +from torch.nn.modules.utils import _triple + + +@CONV_LAYERS.register_module() +class Conv2plus1d(nn.Module): + """(2+1)d Conv module for R(2+1)d backbone. + + https://arxiv.org/pdf/1711.11248.pdf. + + Args: + in_channels (int): Same as nn.Conv3d. + out_channels (int): Same as nn.Conv3d. + kernel_size (int | tuple[int]): Same as nn.Conv3d. + stride (int | tuple[int]): Same as nn.Conv3d. + padding (int | tuple[int]): Same as nn.Conv3d. + dilation (int | tuple[int]): Same as nn.Conv3d. + groups (int): Same as nn.Conv3d. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + norm_cfg=dict(type='BN3d')): + super().__init__() + + kernel_size = _triple(kernel_size) + stride = _triple(stride) + padding = _triple(padding) + assert len(kernel_size) == len(stride) == len(padding) == 3 + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.bias = bias + self.norm_cfg = norm_cfg + self.output_padding = (0, 0, 0) + self.transposed = False + + # The middle-plane is calculated according to: + # M_i = \floor{\frac{t * d^2 N_i-1 * N_i} + # {d^2 * N_i-1 + t * N_i}} + # where d, t are spatial and temporal kernel, and + # N_i, N_i-1 are planes + # and inplanes. https://arxiv.org/pdf/1711.11248.pdf + mid_channels = 3 * ( + in_channels * out_channels * kernel_size[1] * kernel_size[2]) + mid_channels /= ( + in_channels * kernel_size[1] * kernel_size[2] + 3 * out_channels) + mid_channels = int(mid_channels) + + self.conv_s = nn.Conv3d( + in_channels, + mid_channels, + kernel_size=(1, kernel_size[1], kernel_size[2]), + stride=(1, stride[1], stride[2]), + padding=(0, padding[1], padding[2]), + bias=bias) + _, self.bn_s = build_norm_layer(self.norm_cfg, mid_channels) + self.relu = nn.ReLU(inplace=True) + self.conv_t = nn.Conv3d( + mid_channels, + out_channels, + kernel_size=(kernel_size[0], 1, 1), + stride=(stride[0], 1, 1), + padding=(padding[0], 0, 0), + bias=bias) + + self.init_weights() + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + x = self.conv_s(x) + x = self.bn_s(x) + x = self.relu(x) + x = self.conv_t(x) + return x + + def init_weights(self): + """Initiate the parameters from scratch.""" + kaiming_init(self.conv_s) + kaiming_init(self.conv_t) + constant_init(self.bn_s, 1, bias=0) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv_audio.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv_audio.py new file mode 100644 index 0000000..3b8a606 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/common/conv_audio.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +from mmcv.cnn import CONV_LAYERS, ConvModule, constant_init, kaiming_init +from torch.nn.modules.utils import _pair + + +@CONV_LAYERS.register_module() +class ConvAudio(nn.Module): + """Conv2d module for AudioResNet backbone. + + `_. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + op (string): Operation to merge the output of freq + and time feature map. Choices are 'sum' and 'concat'. + Default: 'concat'. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + op='concat', + stride=1, + padding=0, + dilation=1, + groups=1, + bias=False): + super().__init__() + + kernel_size = _pair(kernel_size) + stride = _pair(stride) + padding = _pair(padding) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + assert op in ['concat', 'sum'] + self.op = op + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.bias = bias + self.output_padding = (0, 0) + self.transposed = False + + self.conv_1 = ConvModule( + in_channels, + out_channels, + kernel_size=(kernel_size[0], 1), + stride=stride, + padding=(kernel_size[0] // 2, 0), + bias=bias, + conv_cfg=dict(type='Conv'), + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU')) + + self.conv_2 = ConvModule( + in_channels, + out_channels, + kernel_size=(1, kernel_size[1]), + stride=stride, + padding=(0, kernel_size[1] // 2), + bias=bias, + conv_cfg=dict(type='Conv'), + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU')) + + self.init_weights() + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + x_1 = self.conv_1(x) + x_2 = self.conv_2(x) + if self.op == 'concat': + out = torch.cat([x_1, x_2], 1) + else: + out = x_1 + x_2 + return out + + def init_weights(self): + """Initiate the parameters from scratch.""" + kaiming_init(self.conv_1.conv) + kaiming_init(self.conv_2.conv) + constant_init(self.conv_1.bn, 1, bias=0) + constant_init(self.conv_2.bn, 1, bias=0) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/__init__.py new file mode 100644 index 0000000..d28b041 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/__init__.py @@ -0,0 +1,27 @@ +from .audio_tsn_head import AudioTSNHead +from .base import BaseHead +from .i3d_head import I3DHead +from .i3d_bnn_head import I3DBNNHead +from .i3d_rpl_head import I3DRPLHead +from .slowfast_head import SlowFastHead +from .slowfast_rpl_head import SlowFastRPLHead +from .slowfast_bnn_head import SlowFastBNNHead +from .ssn_head import SSNHead +from .tpn_head import TPNHead +from .tpn_rpl_head import TPNRPLHead +from .tpn_bnn_head import TPNBNNHead +from .tsm_head import TSMHead +from .tsm_bnn_head import TSMBNNHead +from .tsm_rpl_head import TSMRPLHead +from .tsn_head import TSNHead +from .x3d_head import X3DHead +from .aux_head import AuxHead +from .rebias_head import RebiasHead +from .debias_head import DebiasHead +from .base_cls_head import BaseClsHead + + +__all__ = [ + 'TSNHead', 'I3DHead', 'I3DBNNHead', 'I3DRPLHead', 'BaseHead', 'TSMHead', 'TSMBNNHead', 'TSMRPLHead', 'SlowFastHead', 'SlowFastBNNHead', 'SlowFastRPLHead', 'SSNHead', + 'TPNHead', 'TPNBNNHead', 'TPNRPLHead', 'AudioTSNHead', 'X3DHead', 'AuxHead', 'RebiasHead', 'DebiasHead', 'BaseClsHead' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/audio_tsn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/audio_tsn_head.py new file mode 100644 index 0000000..b956e1f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/audio_tsn_head.py @@ -0,0 +1,73 @@ +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import BaseHead + + +@HEADS.register_module() +class AudioTSNHead(BaseHead): + """Classification head for TSN on audio. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss'), + spatial_type='avg', + dropout_ratio=0.4, + init_std=0.01, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs) + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + else: + self.avg_pool = None + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_cls, std=self.init_std) + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N * num_segs, in_channels, h, w] + x = self.avg_pool(x) + # [N, in_channels, 1, 1] + x = x.view(x.size(0), -1) + # [N, in_channels] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels] + cls_score = self.fc_cls(x) + # [N, num_classes] + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/aux_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/aux_head.py new file mode 100644 index 0000000..12f0e0c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/aux_head.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init +from ..builder import build_loss + + +class AuxHead(nn.Module): + """Auxiliary Head. + + This auxiliary head is appended to receive stronger supervision, + leading to enhanced semantics. + + Args: + in_channels (int): Channel number of input features. + out_channels (int): Channel number of output features. + loss_weight (float): weight of loss for the auxiliary head. + Default: 0.5. + loss_cls (dict): loss_cls (dict): Config for building loss. + Default: ``dict(type='CrossEntropyLoss')``. + """ + + def __init__(self, + in_channels, + out_channels, + loss_weight=0.5, + loss_cls=dict(type='CrossEntropyLoss')): + super().__init__() + + self.conv = ConvModule( + in_channels, + in_channels * 2, (1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True)) + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + self.loss_weight = loss_weight + self.dropout = nn.Dropout(p=0.5) + self.fc = nn.Linear(in_channels * 2, out_channels) + self.loss_cls = build_loss(loss_cls) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Linear): + normal_init(m, std=0.01) + if isinstance(m, nn.Conv3d): + xavier_init(m, distribution='uniform') + if isinstance(m, nn.BatchNorm3d): + constant_init(m, 1) + + def forward(self, x, target=None): + losses = dict() + if target is None: + return losses + x = self.conv(x) + x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) + x = self.dropout(x) + x = self.fc(x) + + if target.shape == torch.Size([]): + target = target.unsqueeze(0) + + losses['loss_aux'] = self.loss_weight * self.loss_cls(x, target) + return losses \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base.py new file mode 100644 index 0000000..92e9b04 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base.py @@ -0,0 +1,102 @@ +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn as nn + +from ...core import top_k_accuracy +from ..builder import build_loss + + +class AvgConsensus(nn.Module): + """Average consensus module. + + Args: + dim (int): Decide which dim consensus function to apply. + Default: 1. + """ + + def __init__(self, dim=1): + super().__init__() + self.dim = dim + + def forward(self, x): + """Defines the computation performed at every call.""" + return x.mean(dim=self.dim, keepdim=True) + + +class BaseHead(nn.Module, metaclass=ABCMeta): + """Base class for head. + + All Head should subclass it. + All subclass should overwrite: + - Methods:``init_weights``, initializing weights in some modules. + - Methods:``forward``, supporting to forward both for training and testing. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + multi_class (bool): Determines whether it is a multi-class + recognition task. Default: False. + label_smooth_eps (float): Epsilon used in label smooth. + Reference: arxiv.org/abs/1906.02629. Default: 0. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss', loss_factor=1.0), + multi_class=False, + label_smooth_eps=0.0): + super().__init__() + self.num_classes = num_classes + self.in_channels = in_channels + self.loss_cls = build_loss(loss_cls) + self.multi_class = multi_class + self.label_smooth_eps = label_smooth_eps + + @abstractmethod + def init_weights(self): + """Initiate the parameters either from existing checkpoint or from + scratch.""" + + @abstractmethod + def forward(self, x): + """Defines the computation performed at every call.""" + + def loss(self, cls_score, labels, **kwargs): + """Calculate the loss given output ``cls_score``, target ``labels``. + + Args: + cls_score (torch.Tensor): The output of the model. + labels (torch.Tensor): The target output of the model. + + Returns: + dict: A dict containing field 'loss_cls'(mandatory) + and 'top1_acc', 'top5_acc'(optional). + """ + losses = dict() + if labels.shape == torch.Size([]): + labels = labels.unsqueeze(0) + + if not self.multi_class: + top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(), + labels.detach().cpu().numpy(), (1, 5)) + losses['top1_acc'] = torch.tensor( + top_k_acc[0], device=cls_score.device) + losses['top5_acc'] = torch.tensor( + top_k_acc[1], device=cls_score.device) + + elif self.label_smooth_eps != 0: + labels = ((1 - self.label_smooth_eps) * labels + + self.label_smooth_eps / self.num_classes) + + loss_cls = self.loss_cls(cls_score, labels, **kwargs) + # loss_cls may be dictionary or single tensor + if isinstance(loss_cls, dict): + losses.update(loss_cls) + else: + losses['loss_cls'] = loss_cls + + return losses diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base_cls_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base_cls_head.py new file mode 100644 index 0000000..2d53456 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/base_cls_head.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..builder import HEADS +from .base import BaseHead +import pdb + +@HEADS.register_module() +class BaseClsHead(BaseHead): + """The classification head for SlowFast. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.8. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss'), + dropout_ratio=0.5, + init_std=0.01, + **kwargs): + + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.init_std = init_std + self.dropout_ratio = dropout_ratio + self.fc_cls = nn.Linear(in_channels, num_classes) + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_cls, std=self.init_std) + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + if self.dropout is not None: + x = self.dropout(x) + + # [N x num_classes] + cls_score = self.fc_cls(x) + + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/bnn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/bnn.py new file mode 100644 index 0000000..fd5036b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/bnn.py @@ -0,0 +1,122 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +class Gaussian(object): + def __init__(self, mu, rho): + super().__init__() + self.mu = mu + self.rho = rho + self.normal = torch.distributions.Normal(0,1) + + @property + def sigma(self): + return torch.log1p(torch.exp(self.rho)) + + def sample(self): + epsilon = self.normal.sample(self.rho.size()).to(self.mu.device) + return self.mu + self.sigma * epsilon + + def log_prob(self, input): + return (-math.log(math.sqrt(2 * math.pi)) + - torch.log(self.sigma) + - ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum() + + +class ScaleMixtureGaussian(object): + def __init__(self, pi, sigma1, sigma2): + super().__init__() + self.pi = pi + self.sigma1 = sigma1 + self.sigma2 = sigma2 + + def log_prob(self, input): + gaussian1 = torch.distributions.Normal(0, self.sigma1.to(input.device)) + gaussian2 = torch.distributions.Normal(0, self.sigma2.to(input.device)) + prob1 = torch.exp(gaussian1.log_prob(input)) + prob2 = torch.exp(gaussian2.log_prob(input)) + return (torch.log(self.pi * prob1 + (1-self.pi) * prob2)).sum() + + +class BayesianLinear(nn.Module): + def __init__(self, in_features, out_features, pi=0.5, sigma_1=None, sigma_2=None): + super().__init__() + self.in_features = in_features + self.out_features = out_features + if sigma_1 is None or sigma_2 is None: + sigma_1 = torch.FloatTensor([math.exp(-0)]) + sigma_2 = torch.FloatTensor([math.exp(-6)]) + + # Weight parameters + self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2)) + self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-5,-4)) + self.weight = Gaussian(self.weight_mu, self.weight_rho) + # Bias parameters + self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2)) + self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-5,-4)) + self.bias = Gaussian(self.bias_mu, self.bias_rho) + # Prior distributions + self.weight_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2) + self.bias_prior = ScaleMixtureGaussian(pi, sigma_1, sigma_2) + self.log_prior = 0 + self.log_variational_posterior = 0 + + def forward(self, input, sample=False, calculate_log_probs=False): + if self.training or sample: + weight = self.weight.sample() + bias = self.bias.sample() + else: + weight = self.weight.mu + bias = self.bias.mu + if self.training or calculate_log_probs: + self.log_prior = self.weight_prior.log_prob(weight) + self.bias_prior.log_prob(bias) + self.log_variational_posterior = self.weight.log_prob(weight) + self.bias.log_prob(bias) + else: + self.log_prior, self.log_variational_posterior = 0, 0 + + return F.linear(input, weight, bias) + + +class BayesianPredictor(nn.Module): + def __init__(self, input_dim, output_dim, act=torch.relu, pi=0.5, sigma_1=None, sigma_2=None): + super(BayesianPredictor, self).__init__() + self.output_dim = output_dim + self.act = act + self.bnn_layer = BayesianLinear(input_dim, output_dim, pi=pi, sigma_1=sigma_1, sigma_2=sigma_2) + + def log_prior(self): + return self.bnn_layer.log_prior + + def log_variational_posterior(self): + return self.bnn_layer.log_variational_posterior + + def forward(self, input, npass=2, testing=False): + npass_size = npass + 1 if testing else npass + outputs = torch.zeros(npass_size, input.size(0), self.output_dim).to(input.device) + log_priors = torch.zeros(npass_size).to(input.device) + log_variational_posteriors = torch.zeros(npass_size).to(input.device) + for i in range(npass): + outputs[i] = self.bnn_layer(input, sample=True) + log_priors[i] = self.log_prior() + log_variational_posteriors[i] = self.log_variational_posterior() + if testing: + outputs[npass] = self.bnn_layer(input, sample=False) + return outputs, log_priors, log_variational_posteriors + + +def get_uncertainty(outputs): + # predict the aleatoric and epistemic uncertainties + p = F.softmax(outputs, dim=-1) # N x B x C + # compute aleatoric uncertainty + p_diag = torch.diag_embed(p, offset=0, dim1=-2, dim2=-1) # N x B x C x C + p_cov = torch.matmul(p.unsqueeze(-1), p.unsqueeze(-1).permute(0, 1, 3, 2)) # N x B x C x C + uncertain_alea = torch.mean(p_diag - p_cov, dim=0) # B x C x C + # compute epistemic uncertainty + p_bar= torch.mean(p, dim=0) # B x C + p_diff_var = torch.matmul((p-p_bar).unsqueeze(-1), (p-p_bar).unsqueeze(-1).permute(0, 1, 3, 2)) # N x B x C x C + uncertain_epis = torch.mean(p_diff_var, dim=0) # B x C x C + # transform the class-wise uncertainties into total uncertainty by matrix tracing + uncertain_alea = torch.diagonal(uncertain_alea, dim1=-2, dim2=-1).sum(-1) + uncertain_epis = torch.diagonal(uncertain_epis, dim1=-2, dim2=-1).sum(-1) + return uncertain_alea, uncertain_epis \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/debias_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/debias_head.py new file mode 100644 index 0000000..eb08670 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/debias_head.py @@ -0,0 +1,234 @@ +import torch.nn as nn +import torch +from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init +import numpy as np + +from ..registry import HEADS +from .base import BaseHead + +@HEADS.register_module() +class DebiasHead(BaseHead): + """Debias head. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='EvidenceLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.5. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='EvidenceLoss'), + loss_factor=0.1, + hsic_factor=0.5, # useful when alternative=True + alternative=False, + bias_input=True, + bias_network=True, + dropout_ratio=0.5, + init_std=0.01, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.bias_input = bias_input + self.bias_network = bias_network + assert bias_input or bias_network, "At least one of the choices (bias_input, bias_network) should be True!" + self.loss_factor = loss_factor + self.hsic_factor = hsic_factor + self.alternative = alternative + self.f1_conv3d = ConvModule( + in_channels, + in_channels * 2, (1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True)) + if bias_input: + self.f2_conv3d = ConvModule( + in_channels, + in_channels * 2, (1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True)) + if bias_network: + self.f3_conv2d = ConvModule( + in_channels, + in_channels * 2, (3, 3), + stride=(2, 2), + padding=(1, 1), + bias=False, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict(type='BN', requires_grad=True)) + self.dropout_ratio = dropout_ratio + self.init_std = init_std + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.f1_fc = nn.Linear(self.in_channels * 2, self.num_classes) + self.f2_fc = nn.Linear(self.in_channels * 2, self.num_classes) + self.f3_fc = nn.Linear(self.in_channels * 2, self.num_classes) + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + + def init_weights(self): + """Initiate the parameters from scratch.""" + for m in self.modules(): + if isinstance(m, nn.Linear): + normal_init(m, std=self.init_std) + if isinstance(m, nn.Conv3d): + xavier_init(m, distribution='uniform') + if isinstance(m, nn.BatchNorm3d): + constant_init(m, 1) + + def exp_evidence(self, y): + return torch.exp(torch.clamp(y, -10, 10)) + + def edl_loss(self, func, alpha, y): + S = torch.sum(alpha, dim=1, keepdim=True) + loss = torch.sum(y * (func(S) - func(alpha)), dim=1, keepdim=True) + return loss + + def kl_divergence(self, alpha, beta): + # compute the negative KL divergence between two Dirichlet distribution + S_alpha = torch.sum(alpha, dim=1, keepdim=True) + S_beta = torch.sum(beta, dim=1, keepdim=True) + lnA = torch.lgamma(S_alpha) - torch.sum(torch.lgamma(alpha), dim=1, keepdim=True) + lnB = torch.lgamma(S_beta) - torch.sum(torch.lgamma(beta), dim=1, keepdim=True) + # compute the digamma term + dg_term = torch.digamma(alpha) - torch.digamma(S_alpha) + # final KL divergence + kl = lnA - lnB + torch.sum((alpha - beta) * dg_term, dim=1, keepdim=True) + return kl + + def _kernel(self, X, sigma): + X = X.view(len(X), -1) + XX = X @ X.t() + X_sqnorms = torch.diag(XX) + X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0) + gamma = 1 / (2 * sigma ** 2) + + kernel_XX = torch.exp(-gamma * X_L2) + return kernel_XX + + def hsic_loss(self, input1, input2, unbiased=False): + N = len(input1) + if N < 4: + return torch.tensor(0.0).to(input1.device) + # we simply use the squared dimension of feature as the sigma for RBF kernel + sigma_x = np.sqrt(input1.size()[1]) + sigma_y = np.sqrt(input2.size()[1]) + + # compute the kernels + kernel_XX = self._kernel(input1, sigma_x) + kernel_YY = self._kernel(input2, sigma_y) + + if unbiased: + """Unbiased estimator of Hilbert-Schmidt Independence Criterion + Song, Le, et al. "Feature selection via dependence maximization." 2012. + """ + tK = kernel_XX - torch.diag(torch.diag(kernel_XX)) + tL = kernel_YY - torch.diag(torch.diag(kernel_YY)) + hsic = ( + torch.trace(tK @ tL) + + (torch.sum(tK) * torch.sum(tL) / (N - 1) / (N - 2)) + - (2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2)) + ) + loss = hsic if self.alternative else hsic / (N * (N - 3)) + else: + """Biased estimator of Hilbert-Schmidt Independence Criterion + Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. + """ + KH = kernel_XX - kernel_XX.mean(0, keepdim=True) + LH = kernel_YY - kernel_YY.mean(0, keepdim=True) + loss = torch.trace(KH @ LH / (N - 1) ** 2) + return loss + + def forward(self, x, num_segs=None, target=None, **kwargs): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. (B, 1024, 8, 14, 14) + + Returns: + torch.Tensor: The classification scores for input samples. + """ + feat = x.clone() if isinstance(x, torch.Tensor) else x[-2].clone() + if len(feat.size()) == 4: # for 2D recognizer + assert num_segs is not None + feat = feat.view((-1, num_segs) + feat.size()[1:]).transpose(1, 2).contiguous() + # one-hot embedding for the target + y = torch.eye(self.num_classes).to(feat.device) + y = y[target] + losses = dict() + + # f1_Conv3D(x) + x = self.f1_conv3d(feat) # (B, 2048, 8, 7, 7) + feat_unbias = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) + x = self.dropout(feat_unbias) + x = self.f1_fc(x) + alpha_unbias = self.exp_evidence(x) + 1 + # minimize the edl losses + loss_cls1 = self.edl_loss(torch.log, alpha_unbias, y) + losses.update({'loss_unbias_cls': loss_cls1}) + + loss_hsic_f, loss_hsic_g = torch.zeros_like(loss_cls1), torch.zeros_like(loss_cls1) + if self.bias_input: + # f2_Conv3D(x) + feat_shuffle = feat[:, :, torch.randperm(feat.size()[2])] + x = self.f2_conv3d(feat_shuffle) # (B, 2048, 8, 7, 7) + feat_bias1 = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) + x = self.dropout(feat_bias1) + x = self.f2_fc(x) + alpha_bias1 = self.exp_evidence(x) + 1 + # minimize the edl losses + loss_cls2 = self.edl_loss(torch.log, alpha_bias1, y) + losses.update({'loss_bias1_cls': loss_cls2}) + if self.alternative: + # minimize HSIC w.r.t. feat_unbias, and maximize HSIC w.r.t. feat_bias1 + loss_hsic_f += self.hsic_factor * self.hsic_loss(feat_unbias, feat_bias1.detach(), unbiased=True) + loss_hsic_g += - self.hsic_factor * self.hsic_loss(feat_unbias.detach(), feat_bias1, unbiased=True) + else: + # maximize HSIC + loss_hsic1 = -1.0 * self.hsic_loss(alpha_unbias, alpha_bias1) + losses.update({"loss_bias1_hsic": loss_hsic1}) + + if self.bias_network: + # f3_Conv2D(x) + B, C, T, H, W = feat.size() + feat_reshape = feat.permute(0, 2, 1, 3, 4).contiguous().view(-1, C, H, W) # (B*T, C, H, W) + x = self.f3_conv2d(feat_reshape) # (64, 2048, 7, 7) + x = x.view(B, T, x.size(-3), x.size(-2), x.size(-1)).permute(0, 2, 1, 3, 4) # (B, 2048, 8, 7, 7) + feat_bias2 = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) + x = self.dropout(feat_bias2) + x = self.f3_fc(x) + alpha_bias2 = self.exp_evidence(x) + 1 + # minimize the edl losses + loss_cls3 = self.edl_loss(torch.log, alpha_bias2, y) + losses.update({'loss_bias2_cls': loss_cls3}) + if self.alternative: + # minimize HSIC w.r.t. feat_unbias, and maximize HSIC w.r.t. feat_bias2 + loss_hsic_f += self.hsic_factor * self.hsic_loss(feat_unbias, feat_bias2.detach(), unbiased=True) + loss_hsic_g += - self.hsic_factor * self.hsic_loss(feat_unbias.detach(), feat_bias2, unbiased=True) + else: + # maximize HSIC + loss_hsic2 = -1.0 * self.hsic_loss(alpha_unbias, alpha_bias2) + losses.update({"loss_bias2_hsic": loss_hsic2}) + + if self.alternative: + # Here, we use odd iterations for minimizing hsic_f, and use even iterations for maximizing hsic_g + assert 'iter' in kwargs, "iter number is missing!" + loss_mask = kwargs['iter'] % 2 + loss_hsic = loss_mask * loss_hsic_f + (1 - loss_mask) * loss_hsic_g + losses.update({'loss_hsic': loss_hsic}) + + for k, v in losses.items(): + losses.update({k: v * self.loss_factor}) + return losses diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_bnn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_bnn_head.py new file mode 100644 index 0000000..8274427 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_bnn_head.py @@ -0,0 +1,92 @@ +import torch +import torch.nn as nn + +from ..registry import HEADS +from .base import BaseHead +from ..builder import build_loss +from .bnn import BayesianPredictor, get_uncertainty + + +@HEADS.register_module() +class I3DBNNHead(BaseHead): + """Classification head for I3D. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='BayesianNNLoss'), + spatial_type='avg', + dropout_ratio=0, + init_std=0.01, + compute_uncertainty=False, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.bnn_loss = self.loss_cls + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + self.compute_uncertainty = compute_uncertainty + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + # self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + # normal_init(self.bnn_cls, std=self.init_std) + pass # BNN do not need to explicity initialized + + + def forward(self, x, npass=2, testing=False): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N, in_channels, 4, 7, 7] + if self.avg_pool is not None: + x = self.avg_pool(x) + # [N, in_channels, 1, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1, 1] + x = x.view(x.shape[0], -1) + # [N, in_channels] + outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing) + + # gather output dictionary + output = outputs.mean(0) + log_prior = log_priors.mean() + log_variational_posterior = log_variational_posteriors.mean() + output_dict = {'pred_mean': output, + 'log_prior': log_prior, + 'log_posterior': log_variational_posterior} + if self.compute_uncertainty: + uncertain_alea, uncertain_epis = get_uncertainty(outputs) + output_dict.update({'aleatoric': uncertain_alea, + 'epistemic': uncertain_epis}) + return output_dict diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_head.py new file mode 100644 index 0000000..bfe423d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_head.py @@ -0,0 +1,73 @@ +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import BaseHead + + +@HEADS.register_module() +class I3DHead(BaseHead): + """Classification head for I3D. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.5. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss'), + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_cls, std=self.init_std) + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N, in_channels, 4, 7, 7] + if self.avg_pool is not None: + x = self.avg_pool(x) + # [N, in_channels, 1, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1, 1] + x = x.view(x.shape[0], -1) + # [N, in_channels] + cls_score = self.fc_cls(x) + # [N, num_classes] + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_rpl_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_rpl_head.py new file mode 100644 index 0000000..58b8024 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/i3d_rpl_head.py @@ -0,0 +1,100 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import BaseHead + + +@HEADS.register_module() +class I3DRPLHead(BaseHead): + """Classification head for I3D. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.5. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='RPLoss'), + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01, + num_centers=1, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + self.num_centers = num_centers + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_centers, std=self.init_std) + + + def compute_dist(self, features, center=None, metric='fc'): + if metric == 'l2': + f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True) + if center is None: + c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True) + dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0) + else: + c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True) + dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0) + dist = dist / float(features.shape[1]) + else: + if center is None: + dist = self.fc_centers(features) + else: + dist = features.matmul(center.t()) + dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers]) + dist = torch.mean(dist, dim=2) + return dist + + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N, in_channels, 4, 7, 7] + if self.avg_pool is not None: + x = self.avg_pool(x) + # [N, in_channels, 1, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1, 1] + x = x.view(x.shape[0], -1) + # [N, in_channels] + dist = self.compute_dist(x) + # [N, num_classes] + if self.loss_cls.__class__.__name__ == 'GCPLoss': + dist = -dist + outputs = {'dist': dist, 'feature': x, 'centers': self.fc_centers.weight} + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rebias_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rebias_head.py new file mode 100644 index 0000000..470d054 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rebias_head.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init +from ..builder import build_loss + + +class RebiasHead(nn.Module): + def __init__(self, + in_channels, + out_channels, + loss_weight=0.5, + loss_rebias=dict(type='RebiasLoss')): + super().__init__() + + self.conv_f = ConvModule( + in_channels, + in_channels * 2, (1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True)) + self.conv_g = ConvModule( + in_channels, + in_channels * 2, (1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True)) + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + self.loss_weight = loss_weight + self.dropout = nn.Dropout(p=0.5) + self.fc_f = nn.Linear(in_channels * 2, out_channels) + self.fc_g = nn.Linear(in_channels * 2, out_channels) + self.loss_rebias = build_loss(loss_rebias) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Linear): + normal_init(m, std=0.01) + if isinstance(m, nn.Conv3d): + xavier_init(m, distribution='uniform') + if isinstance(m, nn.BatchNorm3d): + constant_init(m, 1) + + def forward(self, x, target=None): + # x: (B, 1024, 8, 14, 14) + xs = x.detach().clone() # we do not want the backbone is updated by g(xs) + xs = xs[:, :, torch.randperm(xs.size()[2])] # temporally shuffle the feature + losses = dict() + if target is None: + return losses + # f(x) + x = self.conv_f(x) # (B, 2048, 8, 7, 7) + x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) + # phi(f(x)) + y = self.dropout(x) + y = self.fc_f(y) + + # g(xs) + xs = self.conv_g(xs) # (B, 2048, 8, 7, 7) + xs = self.avg_pool(xs).squeeze(-1).squeeze(-1).squeeze(-1) + # phi(g(xs)) + ys = self.dropout(xs) + ys = self.fc_g(ys) + + if target.shape == torch.Size([]): + target = target.unsqueeze(0) + + # compute the rebias losses + rebias_loss = self.loss_rebias(x, xs, y, ys, target) + + if isinstance(rebias_loss, dict): + for k, v in rebias_loss.items(): + losses.update({k: self.loss_weight * v}) + else: + losses = {'loss_rebias': rebias_loss} + return losses \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rpl_dist.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rpl_dist.py new file mode 100644 index 0000000..24ec252 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/rpl_dist.py @@ -0,0 +1,38 @@ +import torch +import torch.nn as nn + + +class Dist(nn.Module): + def __init__(self, num_classes=10, num_centers=1, feat_dim=2, init='random'): + super(Dist, self).__init__() + self.feat_dim = feat_dim + self.num_classes = num_classes + self.num_centers = num_centers + + if init == 'random': + self.centers = nn.Parameter(0.1 * torch.randn(num_classes * num_centers, self.feat_dim)) + else: + self.centers = nn.Parameter(torch.Tensor(num_classes * num_centers, self.feat_dim)) + self.centers.data.fill_(0) + + def forward(self, features, center=None, metric='l2'): + if metric == 'l2': + f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True) + if center is None: + c_2 = torch.sum(torch.pow(self.centers, 2), dim=1, keepdim=True) + dist = f_2 - 2*torch.matmul(features, torch.transpose(self.centers, 1, 0)) + torch.transpose(c_2, 1, 0) + else: + c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True) + dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0) + dist = dist / float(features.shape[1]) + else: + if center is None: + center = self.centers + else: + center = center + dist = features.matmul(center.t()) + dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers]) + dist = torch.mean(dist, dim=2) + + return dist + \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_bnn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_bnn_head.py new file mode 100644 index 0000000..62b93f9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_bnn_head.py @@ -0,0 +1,96 @@ +import torch +import torch.nn as nn + +from ..registry import HEADS +from .base import BaseHead +from ..builder import build_loss +from .bnn import BayesianPredictor, get_uncertainty + + +@HEADS.register_module() +class SlowFastBNNHead(BaseHead): + """The classification head for SlowFast. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.8. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='BayesianNNLoss'), + spatial_type='avg', + dropout_ratio=0.8, + init_std=0.01, + compute_uncertainty=False, + **kwargs): + + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.bnn_loss = self.loss_cls + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + self.compute_uncertainty = compute_uncertainty + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + # self.fc_cls = nn.Linear(in_channels, num_classes) + self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes) + + if self.spatial_type == 'avg': + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + pass # BNN do not need to explicity initialized + + def forward(self, x, npass=2, testing=False): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)]) + x_fast, x_slow = x + # ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1]) + x_fast = self.avg_pool(x_fast) + x_slow = self.avg_pool(x_slow) + # [N, channel_fast + channel_slow, 1, 1, 1] + x = torch.cat((x_slow, x_fast), dim=1) + + if self.dropout is not None: + x = self.dropout(x) + + # [N x C] + x = x.view(x.size(0), -1) + # [N x num_classes] + outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing) + + # gather output dictionary + output = outputs.mean(0) + log_prior = log_priors.mean() + log_variational_posterior = log_variational_posteriors.mean() + output_dict = {'pred_mean': output, + 'log_prior': log_prior, + 'log_posterior': log_variational_posterior} + if self.compute_uncertainty: + uncertain_alea, uncertain_epis = get_uncertainty(outputs) + output_dict.update({'aleatoric': uncertain_alea, + 'epistemic': uncertain_epis}) + + return output_dict diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_head.py new file mode 100644 index 0000000..5b195aa --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_head.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import BaseHead + + +@HEADS.register_module() +class SlowFastHead(BaseHead): + """The classification head for SlowFast. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.8. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss'), + spatial_type='avg', + dropout_ratio=0.8, + init_std=0.01, + **kwargs): + + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_cls = nn.Linear(in_channels, num_classes) + + if self.spatial_type == 'avg': + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_cls, std=self.init_std) + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)]) + x_fast, x_slow = x + # ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1]) + x_fast = self.avg_pool(x_fast) + x_slow = self.avg_pool(x_slow) + # [N, channel_fast + channel_slow, 1, 1, 1] + x = torch.cat((x_slow, x_fast), dim=1) + + if self.dropout is not None: + x = self.dropout(x) + + # [N x C] + x = x.view(x.size(0), -1) + # [N x num_classes] + cls_score = self.fc_cls(x) + + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_rpl_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_rpl_head.py new file mode 100644 index 0000000..d579cc3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/slowfast_rpl_head.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import BaseHead + + +@HEADS.register_module() +class SlowFastRPLHead(BaseHead): + """The classification head for SlowFast. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.8. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='RPLoss'), + spatial_type='avg', + dropout_ratio=0.8, + init_std=0.01, + num_centers=1, + **kwargs): + + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + self.num_centers = num_centers + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False) + + if self.spatial_type == 'avg': + self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_centers, std=self.init_std) + + def compute_dist(self, features, center=None, metric='fc'): + if metric == 'l2': + f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True) + if center is None: + c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True) + dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0) + else: + c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True) + dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0) + dist = dist / float(features.shape[1]) + else: + if center is None: + dist = self.fc_centers(features) + else: + dist = features.matmul(center.t()) + dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers]) + dist = torch.mean(dist, dim=2) + return dist + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # ([N, channel_fast, T, H, W], [(N, channel_slow, T, H, W)]) + x_fast, x_slow = x + # ([N, channel_fast, 1, 1, 1], [N, channel_slow, 1, 1, 1]) + x_fast = self.avg_pool(x_fast) + x_slow = self.avg_pool(x_slow) + # [N, channel_fast + channel_slow, 1, 1, 1] + x = torch.cat((x_slow, x_fast), dim=1) + + if self.dropout is not None: + x = self.dropout(x) + + # [N x C] + x = x.view(x.size(0), -1) + # [N x num_classes] + dist = self.compute_dist(x) + # [N, num_classes] + if self.loss_cls.__class__.__name__ == 'GCPLoss': + dist = -dist + outputs = {'dist': dist, 'feature': x, 'centers': self.fc_centers.weight} + + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/ssn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/ssn_head.py new file mode 100644 index 0000000..3399d3d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/ssn_head.py @@ -0,0 +1,412 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS + + +def parse_stage_config(stage_cfg): + """Parse config of STPP for three stages. + + Args: + stage_cfg (int | tuple[int]): + Config of structured temporal pyramid pooling. + + Returns: + tuple[tuple[int], int]: + Config of structured temporal pyramid pooling and + total number of parts(number of multipliers). + """ + if isinstance(stage_cfg, int): + return (stage_cfg, ), stage_cfg + if isinstance(stage_cfg, tuple): + return stage_cfg, sum(stage_cfg) + raise ValueError(f'Incorrect STPP config {stage_cfg}') + + +class STPPTrain(nn.Module): + """Structured temporal pyramid pooling for SSN at training. + + Args: + stpp_stage (tuple): Config of structured temporal pyramid pooling. + Default: (1, (1, 2), 1). + num_segments_list (tuple): Number of segments to be sampled + in three stages. Default: (2, 5, 2). + """ + + def __init__(self, stpp_stage=(1, (1, 2), 1), num_segments_list=(2, 5, 2)): + super().__init__() + + starting_part, starting_multiplier = parse_stage_config(stpp_stage[0]) + course_part, course_multiplier = parse_stage_config(stpp_stage[1]) + ending_part, ending_multiplier = parse_stage_config(stpp_stage[2]) + + self.num_multipliers = ( + starting_multiplier + course_multiplier + ending_multiplier) + self.stpp_stages = (starting_part, course_part, ending_part) + self.multiplier_list = (starting_multiplier, course_multiplier, + ending_multiplier) + + self.num_segments_list = num_segments_list + + @staticmethod + def _extract_stage_feature(stage_feat, stage_parts, num_multipliers, + scale_factors, num_samples): + """Extract stage feature based on structured temporal pyramid pooling. + + Args: + stage_feat (torch.Tensor): Stage features to be STPP. + stage_parts (tuple): Config of STPP. + num_multipliers (int): Total number of parts in the stage. + scale_factors (list): Ratios of the effective sampling lengths + to augmented lengths. + num_samples (int): Number of samples. + + Returns: + torch.Tensor: Features of the stage. + """ + stage_stpp_feat = [] + stage_len = stage_feat.size(1) + for stage_part in stage_parts: + ticks = torch.arange(0, stage_len + 1e-5, + stage_len / stage_part).int() + for i in range(stage_part): + part_feat = stage_feat[:, ticks[i]:ticks[i + 1], :].mean( + dim=1) / num_multipliers + if scale_factors is not None: + part_feat = ( + part_feat * scale_factors.view(num_samples, 1)) + stage_stpp_feat.append(part_feat) + return stage_stpp_feat + + def forward(self, x, scale_factors): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + scale_factors (list): Ratios of the effective sampling lengths + to augmented lengths. + + Returns: + tuple[torch.Tensor, torch.Tensor]: + Features for predicting activity scores and + completeness scores. + """ + x0 = self.num_segments_list[0] + x1 = x0 + self.num_segments_list[1] + num_segments = x1 + self.num_segments_list[2] + + feat_dim = x.size(1) + x = x.view(-1, num_segments, feat_dim) + num_samples = x.size(0) + + scale_factors = scale_factors.view(-1, 2) + + stage_stpp_feats = [] + stage_stpp_feats.extend( + self._extract_stage_feature(x[:, :x0, :], self.stpp_stages[0], + self.multiplier_list[0], + scale_factors[:, 0], num_samples)) + stage_stpp_feats.extend( + self._extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1], + self.multiplier_list[1], None, + num_samples)) + stage_stpp_feats.extend( + self._extract_stage_feature(x[:, x1:, :], self.stpp_stages[2], + self.multiplier_list[2], + scale_factors[:, 1], num_samples)) + stpp_feat = torch.cat(stage_stpp_feats, dim=1) + + course_feat = x[:, x0:x1, :].mean(dim=1) + return course_feat, stpp_feat + + +class STPPTest(nn.Module): + """Structured temporal pyramid pooling for SSN at testing. + + Args: + num_classes (int): Number of classes to be classified. + use_regression (bool): Whether to perform regression or not. + Default: True. + stpp_stage (tuple): Config of structured temporal pyramid pooling. + Default: (1, (1, 2), 1). + """ + + def __init__(self, + num_classes, + use_regression=True, + stpp_stage=(1, (1, 2), 1)): + super().__init__() + + self.activity_score_len = num_classes + 1 + self.complete_score_len = num_classes + self.reg_score_len = num_classes * 2 + self.use_regression = use_regression + + starting_parts, starting_multiplier = parse_stage_config(stpp_stage[0]) + course_parts, course_multiplier = parse_stage_config(stpp_stage[1]) + ending_parts, ending_multiplier = parse_stage_config(stpp_stage[2]) + + self.num_multipliers = ( + starting_multiplier + course_multiplier + ending_multiplier) + if self.use_regression: + self.feat_dim = ( + self.activity_score_len + self.num_multipliers * + (self.complete_score_len + self.reg_score_len)) + else: + self.feat_dim = ( + self.activity_score_len + + self.num_multipliers * self.complete_score_len) + self.stpp_stage = (starting_parts, course_parts, ending_parts) + + self.activity_slice = slice(0, self.activity_score_len) + self.complete_slice = slice( + self.activity_slice.stop, self.activity_slice.stop + + self.complete_score_len * self.num_multipliers) + self.reg_slice = slice( + self.complete_slice.stop, self.complete_slice.stop + + self.reg_score_len * self.num_multipliers) + + @staticmethod + def _pyramids_pooling(out_scores, index, raw_scores, ticks, scale_factors, + score_len, stpp_stage): + """Perform pyramids pooling. + + Args: + out_scores (torch.Tensor): Scores to be returned. + index (int): Index of output scores. + raw_scores (torch.Tensor): Raw scores before STPP. + ticks (list): Ticks of raw scores. + scale_factors (list): Ratios of the effective sampling lengths + to augmented lengths. + score_len (int): Length of the score. + stpp_stage (tuple): Config of STPP. + """ + offset = 0 + for stage_idx, stage_cfg in enumerate(stpp_stage): + if stage_idx == 0: + scale_factor = scale_factors[0] + elif stage_idx == len(stpp_stage) - 1: + scale_factor = scale_factors[1] + else: + scale_factor = 1.0 + + sum_parts = sum(stage_cfg) + tick_left = ticks[stage_idx] + tick_right = float(max(ticks[stage_idx] + 1, ticks[stage_idx + 1])) + + if tick_right <= 0 or tick_left >= raw_scores.size(0): + offset += sum_parts + continue + for num_parts in stage_cfg: + part_ticks = torch.arange(tick_left, tick_right + 1e-5, + (tick_right - tick_left) / + num_parts).int() + + for i in range(num_parts): + part_tick_left = part_ticks[i] + part_tick_right = part_ticks[i + 1] + if part_tick_right - part_tick_left >= 1: + raw_score = raw_scores[part_tick_left:part_tick_right, + offset * + score_len:(offset + 1) * + score_len] + raw_scale_score = raw_score.mean(dim=0) * scale_factor + out_scores[index, :] += raw_scale_score.detach().cpu() + offset += 1 + + return out_scores + + def forward(self, x, proposal_ticks, scale_factors): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + proposal_ticks (list): Ticks of proposals to be STPP. + scale_factors (list): Ratios of the effective sampling lengths + to augmented lengths. + + Returns: + tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + out_activity_scores (torch.Tensor): Activity scores + out_complete_scores (torch.Tensor): Completeness scores. + out_reg_scores (torch.Tensor): Regression scores. + """ + assert x.size(1) == self.feat_dim + num_ticks = proposal_ticks.size(0) + + out_activity_scores = torch.zeros((num_ticks, self.activity_score_len), + dtype=x.dtype) + raw_activity_scores = x[:, self.activity_slice] + + out_complete_scores = torch.zeros((num_ticks, self.complete_score_len), + dtype=x.dtype) + raw_complete_scores = x[:, self.complete_slice] + + if self.use_regression: + out_reg_scores = torch.zeros((num_ticks, self.reg_score_len), + dtype=x.dtype) + raw_reg_scores = x[:, self.reg_slice] + else: + out_reg_scores = None + raw_reg_scores = None + + for i in range(num_ticks): + ticks = proposal_ticks[i] + + out_activity_scores[i, :] = raw_activity_scores[ + ticks[1]:max(ticks[1] + 1, ticks[2]), :].mean(dim=0) + + out_complete_scores = self._pyramids_pooling( + out_complete_scores, i, raw_complete_scores, ticks, + scale_factors[i], self.complete_score_len, self.stpp_stage) + + if self.use_regression: + out_reg_scores = self._pyramids_pooling( + out_reg_scores, i, raw_reg_scores, ticks, scale_factors[i], + self.reg_score_len, self.stpp_stage) + + return out_activity_scores, out_complete_scores, out_reg_scores + + +@HEADS.register_module() +class SSNHead(nn.Module): + """The classification head for SSN. + + Args: + dropout_ratio (float): Probability of dropout layer. Default: 0.8. + in_channels (int): Number of channels for input data. Default: 1024. + num_classes (int): Number of classes to be classified. Default: 20. + consensus (dict): Config of segmental consensus. + use_regression (bool): Whether to perform regression or not. + Default: True. + init_std (float): Std value for Initiation. Default: 0.001. + """ + + def __init__(self, + dropout_ratio=0.8, + in_channels=1024, + num_classes=20, + consensus=dict( + type='STPPTrain', + standalong_classifier=True, + stpp_cfg=(1, 1, 1), + num_seg=(2, 5, 2)), + use_regression=True, + init_std=0.001): + + super().__init__() + + self.dropout_ratio = dropout_ratio + self.num_classes = num_classes + self.use_regression = use_regression + self.init_std = init_std + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + + # Based on this copy, the model will utilize different + # structured temporal pyramid pooling at training and testing. + # Warning: this copy cannot be removed. + consensus_ = consensus.copy() + consensus_type = consensus_.pop('type') + if consensus_type == 'STPPTrain': + self.consensus = STPPTrain(**consensus_) + elif consensus_type == 'STPPTest': + consensus_['num_classes'] = self.num_classes + self.consensus = STPPTest(**consensus_) + + self.in_channels_activity = in_channels + self.in_channels_complete = ( + self.consensus.num_multipliers * in_channels) + self.activity_fc = nn.Linear(in_channels, num_classes + 1) + self.completeness_fc = nn.Linear(self.in_channels_complete, + num_classes) + if self.use_regression: + self.regressor_fc = nn.Linear(self.in_channels_complete, + num_classes * 2) + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.activity_fc, std=self.init_std) + normal_init(self.completeness_fc, std=self.init_std) + if self.use_regression: + normal_init(self.regressor_fc, std=self.init_std) + + def prepare_test_fc(self, stpp_feat_multiplier): + """Reorganize the shape of fully connected layer at testing, in order + to improve testing efficiency. + + Args: + stpp_feat_multiplier (int): Total number of parts. + + Returns: + bool: Whether the shape transformation is ready for testing. + """ + + in_features = self.activity_fc.in_features + out_features = ( + self.activity_fc.out_features + + self.completeness_fc.out_features * stpp_feat_multiplier) + if self.use_regression: + out_features += ( + self.regressor_fc.out_features * stpp_feat_multiplier) + self.test_fc = nn.Linear(in_features, out_features) + + # Fetch weight and bias of the reorganized fc. + complete_weight = self.completeness_fc.weight.data.view( + self.completeness_fc.out_features, stpp_feat_multiplier, + in_features).transpose(0, 1).contiguous().view(-1, in_features) + complete_bias = self.completeness_fc.bias.data.view(1, -1).expand( + stpp_feat_multiplier, self.completeness_fc.out_features + ).contiguous().view(-1) / stpp_feat_multiplier + + weight = torch.cat((self.activity_fc.weight.data, complete_weight)) + bias = torch.cat((self.activity_fc.bias.data, complete_bias)) + + if self.use_regression: + reg_weight = self.regressor_fc.weight.data.view( + self.regressor_fc.out_features, stpp_feat_multiplier, + in_features).transpose(0, + 1).contiguous().view(-1, in_features) + reg_bias = self.regressor_fc.bias.data.view(1, -1).expand( + stpp_feat_multiplier, self.regressor_fc.out_features + ).contiguous().view(-1) / stpp_feat_multiplier + weight = torch.cat((weight, reg_weight)) + bias = torch.cat((bias, reg_bias)) + + self.test_fc.weight.data = weight + self.test_fc.bias.data = bias + return True + + def forward(self, x, test_mode=False): + """Defines the computation performed at every call.""" + if not test_mode: + x, proposal_scale_factor = x + activity_feat, completeness_feat = self.consensus( + x, proposal_scale_factor) + + if self.dropout is not None: + activity_feat = self.dropout(activity_feat) + completeness_feat = self.dropout(completeness_feat) + + activity_scores = self.activity_fc(activity_feat) + complete_scores = self.completeness_fc(completeness_feat) + if self.use_regression: + bbox_preds = self.regressor_fc(completeness_feat) + bbox_preds = bbox_preds.view(-1, + self.completeness_fc.out_features, + 2) + else: + bbox_preds = None + return activity_scores, complete_scores, bbox_preds + + x, proposal_tick_list, scale_factor_list = x + test_scores = self.test_fc(x) + (activity_scores, completeness_scores, + bbox_preds) = self.consensus(test_scores, proposal_tick_list, + scale_factor_list) + + return (test_scores, activity_scores, completeness_scores, bbox_preds) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_bnn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_bnn_head.py new file mode 100644 index 0000000..5c07828 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_bnn_head.py @@ -0,0 +1,90 @@ +import torch.nn as nn + +from ..registry import HEADS +from .tsn_head import TSNHead +from ..builder import build_loss +from .bnn import BayesianPredictor, get_uncertainty + + +@HEADS.register_module() +class TPNBNNHead(TSNHead): + """Class head for TPN. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + multi_class (bool): Determines whether it is a multi-class + recognition task. Default: False. + label_smooth_eps (float): Epsilon used in label smooth. + Reference: https://arxiv.org/abs/1906.02629. Default: 0. + """ + + def __init__(self, compute_uncertainty=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.compute_uncertainty = compute_uncertainty + + # use bnn classification head + self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes) + self.bnn_loss = self.loss_cls + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels. + self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool3d = None + + self.avg_pool2d = None + + + def forward(self, x, num_segs=None, npass=2, testing=False): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int | None): Number of segments into which a video + is divided. Default: None. + Returns: + torch.Tensor: The classification scores for input samples. + """ + if self.avg_pool2d is None: + kernel_size = (1, x.shape[-2], x.shape[-1]) + self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0) + + if num_segs is None: + # [N, in_channels, 3, 7, 7] + x = self.avg_pool3d(x) + else: + # [N * num_segs, in_channels, 7, 7] + x = self.avg_pool2d(x) + # [N * num_segs, in_channels, 1, 1] + x = x.reshape((-1, num_segs) + x.shape[1:]) + # [N, num_segs, in_channels, 1, 1] + x = self.consensus(x) + # [N, 1, in_channels, 1, 1] + x = x.squeeze(1) + # [N, in_channels, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1] + x = x.view(x.size(0), -1) + # [N, in_channels] + outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing) + + # gather output dictionary + output = outputs.mean(0) + log_prior = log_priors.mean() + log_variational_posterior = log_variational_posteriors.mean() + output_dict = {'pred_mean': output, + 'log_prior': log_prior, + 'log_posterior': log_variational_posterior} + if self.compute_uncertainty: + uncertain_alea, uncertain_epis = get_uncertainty(outputs) + output_dict.update({'aleatoric': uncertain_alea, + 'epistemic': uncertain_epis}) + return output_dict diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_head.py new file mode 100644 index 0000000..b06be38 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_head.py @@ -0,0 +1,71 @@ +import torch.nn as nn + +from ..registry import HEADS +from .tsn_head import TSNHead + + +@HEADS.register_module() +class TPNHead(TSNHead): + """Class head for TPN. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + multi_class (bool): Determines whether it is a multi-class + recognition task. Default: False. + label_smooth_eps (float): Epsilon used in label smooth. + Reference: https://arxiv.org/abs/1906.02629. Default: 0. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels. + self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool3d = None + + self.avg_pool2d = None + + def forward(self, x, num_segs=None): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int | None): Number of segments into which a video + is divided. Default: None. + Returns: + torch.Tensor: The classification scores for input samples. + """ + if self.avg_pool2d is None: + kernel_size = (1, x.shape[-2], x.shape[-1]) + self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0) + + if num_segs is None: + # [N, in_channels, 3, 7, 7] + x = self.avg_pool3d(x) + else: + # [N * num_segs, in_channels, 7, 7] + x = self.avg_pool2d(x) + # [N * num_segs, in_channels, 1, 1] + x = x.reshape((-1, num_segs) + x.shape[1:]) + # [N, num_segs, in_channels, 1, 1] + x = self.consensus(x) + # [N, 1, in_channels, 1, 1] + x = x.squeeze(1) + # [N, in_channels, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1] + x = x.view(x.size(0), -1) + # [N, in_channels] + cls_score = self.fc_cls(x) + # [N, num_classes] + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_rpl_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_rpl_head.py new file mode 100644 index 0000000..2ea7b90 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tpn_rpl_head.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .tsn_head import TSNHead + + +@HEADS.register_module() +class TPNRPLHead(TSNHead): + """Class head for TPN. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + multi_class (bool): Determines whether it is a multi-class + recognition task. Default: False. + label_smooth_eps (float): Epsilon used in label smooth. + Reference: https://arxiv.org/abs/1906.02629. Default: 0. + """ + + def __init__(self, num_centers=1, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.num_centers = num_centers + self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool3d` to adaptively match the in_channels. + self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1)) + else: + self.avg_pool3d = None + + self.avg_pool2d = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_centers, std=self.init_std) + + def compute_dist(self, features, center=None, metric='fc'): + if metric == 'l2': + f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True) + if center is None: + c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True) + dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0) + else: + c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True) + dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0) + dist = dist / float(features.shape[1]) + else: + if center is None: + dist = self.fc_centers(features) + else: + dist = features.matmul(center.t()) + dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers]) + dist = torch.mean(dist, dim=2) + return dist + + + def forward(self, x, num_segs=None): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int | None): Number of segments into which a video + is divided. Default: None. + Returns: + torch.Tensor: The classification scores for input samples. + """ + if self.avg_pool2d is None: + kernel_size = (1, x.shape[-2], x.shape[-1]) + self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0) + + if num_segs is None: + # [N, in_channels, 3, 7, 7] + x = self.avg_pool3d(x) + else: + # [N * num_segs, in_channels, 7, 7] + x = self.avg_pool2d(x) + # [N * num_segs, in_channels, 1, 1] + x = x.reshape((-1, num_segs) + x.shape[1:]) + # [N, num_segs, in_channels, 1, 1] + x = self.consensus(x) + # [N, 1, in_channels, 1, 1] + x = x.squeeze(1) + # [N, in_channels, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1] + x = x.view(x.size(0), -1) + # [N, in_channels] + dist = self.compute_dist(x) + # [N, num_classes] + if self.loss_cls.__class__.__name__ == 'GCPLoss': + dist = -dist + outputs = {'dist': dist, 'feature': x, 'centers': self.fc_centers.weight} + + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_bnn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_bnn_head.py new file mode 100644 index 0000000..71195bb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_bnn_head.py @@ -0,0 +1,128 @@ +import torch +import torch.nn as nn + +from ..registry import HEADS +from .base import AvgConsensus, BaseHead +from ..builder import build_loss +from .bnn import BayesianPredictor, get_uncertainty + + +@HEADS.register_module() +class TSMBNNHead(BaseHead): + """Class head for TSM. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + num_segments (int): Number of frame segments. Default: 8. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + is_shift (bool): Indicating whether the feature is shifted. + Default: True. + temporal_pool (bool): Indicating whether feature is temporal pooled. + Default: False. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + num_segments=8, + loss_cls=dict(type='BayesianNNLoss'), + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0, + init_std=0.001, + compute_uncertainty=False, + is_shift=True, + temporal_pool=False, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.bnn_loss = self.loss_cls + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.num_segments = num_segments + self.init_std = init_std + self.compute_uncertainty = compute_uncertainty + self.is_shift = is_shift + self.temporal_pool = temporal_pool + + consensus_ = consensus.copy() + + consensus_type = consensus_.pop('type') + if consensus_type == 'AvgConsensus': + self.consensus = AvgConsensus(**consensus_) + else: + self.consensus = None + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + # self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + self.bnn_cls = BayesianPredictor(self.in_channels, self.num_classes) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool2d(1) + else: + self.avg_pool = None + + + def init_weights(self): + """Initiate the parameters from scratch.""" + pass # BNN do not need to explicity initialized + + def forward(self, x, num_segs, npass=2, testing=False): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int): Useless in TSMHead. By default, `num_segs` + is equal to `clip_len * num_clips * num_crops`, which is + automatically generated in Recognizer forward phase and + useless in TSM models. The `self.num_segments` we need is a + hyper parameter to build TSM models. + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N * num_segs, in_channels, 7, 7] + x = self.avg_pool(x) + # [N * num_segs, in_channels, 1, 1] + x = torch.flatten(x, 1) + # [N * num_segs, in_channels] + if self.dropout is not None: + x = self.dropout(x) + # [N * num_segs, num_classes] + outputs, log_priors, log_variational_posteriors = self.bnn_cls(x, npass=npass, testing=testing) + + # gather output dictionary + log_prior = log_priors.mean() + log_variational_posterior = log_variational_posteriors.mean() + output_dict = {'log_prior': log_prior, + 'log_posterior': log_variational_posterior} + if self.compute_uncertainty: + uncertain_alea, uncertain_epis = get_uncertainty(outputs) + output_dict.update({'aleatoric': uncertain_alea, + 'epistemic': uncertain_epis}) + + cls_score = outputs.mean(0) + if self.is_shift and self.temporal_pool: + # [2 * N, num_segs // 2, num_classes] + cls_score = cls_score.view((-1, self.num_segments // 2) + + cls_score.size()[1:]) + else: + # [N, num_segs, num_classes] + cls_score = cls_score.view((-1, self.num_segments) + + cls_score.size()[1:]) + # [N, 1, num_classes] + cls_score = self.consensus(cls_score) + output_dict.update({'pred_mean': cls_score.squeeze(1)}) + + return output_dict diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_head.py new file mode 100644 index 0000000..5c0207f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_head.py @@ -0,0 +1,110 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import AvgConsensus, BaseHead + + +@HEADS.register_module() +class TSMHead(BaseHead): + """Class head for TSM. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + num_segments (int): Number of frame segments. Default: 8. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + is_shift (bool): Indicating whether the feature is shifted. + Default: True. + temporal_pool (bool): Indicating whether feature is temporal pooled. + Default: False. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + num_segments=8, + loss_cls=dict(type='CrossEntropyLoss'), + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.001, + is_shift=True, + temporal_pool=False, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.num_segments = num_segments + self.init_std = init_std + self.is_shift = is_shift + self.temporal_pool = temporal_pool + + consensus_ = consensus.copy() + + consensus_type = consensus_.pop('type') + if consensus_type == 'AvgConsensus': + self.consensus = AvgConsensus(**consensus_) + else: + self.consensus = None + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool2d(1) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_cls, std=self.init_std) + + def forward(self, x, num_segs): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int): Useless in TSMHead. By default, `num_segs` + is equal to `clip_len * num_clips * num_crops`, which is + automatically generated in Recognizer forward phase and + useless in TSM models. The `self.num_segments` we need is a + hyper parameter to build TSM models. + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N * num_segs, in_channels, 7, 7] + x = self.avg_pool(x) + # [N * num_segs, in_channels, 1, 1] + x = torch.flatten(x, 1) + # [N * num_segs, in_channels] + if self.dropout is not None: + x = self.dropout(x) + # [N * num_segs, num_classes] + cls_score = self.fc_cls(x) + + if self.is_shift and self.temporal_pool: + # [2 * N, num_segs // 2, num_classes] + cls_score = cls_score.view((-1, self.num_segments // 2) + + cls_score.size()[1:]) + else: + # [N, num_segs, num_classes] + cls_score = cls_score.view((-1, self.num_segments) + + cls_score.size()[1:]) + # [N, 1, num_classes] + cls_score = self.consensus(cls_score) + # [N, num_classes] + return cls_score.squeeze(1) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_rpl_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_rpl_head.py new file mode 100644 index 0000000..eb090dd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsm_rpl_head.py @@ -0,0 +1,145 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import AvgConsensus, BaseHead + + +@HEADS.register_module() +class TSMRPLHead(BaseHead): + """Class head for TSM. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + num_segments (int): Number of frame segments. Default: 8. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + is_shift (bool): Indicating whether the feature is shifted. + Default: True. + temporal_pool (bool): Indicating whether feature is temporal pooled. + Default: False. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + num_segments=8, + loss_cls=dict(type='RPLoss'), + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.8, + init_std=0.001, + is_shift=True, + temporal_pool=False, + num_centers=1, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.num_segments = num_segments + self.init_std = init_std + self.is_shift = is_shift + self.temporal_pool = temporal_pool + self.num_centers = num_centers + + consensus_ = consensus.copy() + + consensus_type = consensus_.pop('type') + if consensus_type == 'AvgConsensus': + self.consensus = AvgConsensus(**consensus_) + else: + self.consensus = None + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_centers = nn.Linear(self.in_channels, self.num_classes * self.num_centers, bias=False) + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool2d(1) + else: + self.avg_pool = None + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_centers, std=self.init_std) + + + def compute_dist(self, features, center=None, metric='fc'): + if metric == 'l2': + f_2 = torch.sum(torch.pow(features, 2), dim=1, keepdim=True) + if center is None: + c_2 = torch.sum(torch.pow(self.fc_centers.weight, 2), dim=1, keepdim=True) + dist = f_2 - 2 * self.fc_centers(features) + torch.transpose(c_2, 1, 0) + else: + c_2 = torch.sum(torch.pow(center, 2), dim=1, keepdim=True) + dist = f_2 - 2*torch.matmul(features, torch.transpose(center, 1, 0)) + torch.transpose(c_2, 1, 0) + dist = dist / float(features.shape[1]) + else: + if center is None: + dist = self.fc_centers(features) + else: + dist = features.matmul(center.t()) + dist = torch.reshape(dist, [-1, self.num_classes, self.num_centers]) + dist = torch.mean(dist, dim=2) + return dist + + + def aggregate(self, cls_score): + if self.is_shift and self.temporal_pool: + # [2 * N, num_segs // 2, num_classes] + cls_score = cls_score.view((-1, self.num_segments // 2) + + cls_score.size()[1:]) + else: + # [N, num_segs, num_classes] + cls_score = cls_score.view((-1, self.num_segments) + + cls_score.size()[1:]) + # [N, 1, num_classes] + cls_score = self.consensus(cls_score) + # [N, num_classes] + return cls_score.squeeze(1) + + + def forward(self, x, num_segs): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int): Useless in TSMHead. By default, `num_segs` + is equal to `clip_len * num_clips * num_crops`, which is + automatically generated in Recognizer forward phase and + useless in TSM models. The `self.num_segments` we need is a + hyper parameter to build TSM models. + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N * num_segs, in_channels, 7, 7] + x = self.avg_pool(x) + # [N * num_segs, in_channels, 1, 1] + x = torch.flatten(x, 1) + # [N * num_segs, in_channels] + if self.dropout is not None: + x = self.dropout(x) + # [N * num_segs, num_classes] + dist = self.compute_dist(x) + # [N, num_classes] + + dist = self.aggregate(dist) + feature = self.aggregate(x) + + if self.loss_cls.__class__.__name__ == 'GCPLoss': + dist = -dist + outputs = {'dist': dist, 'feature': feature, 'centers': self.fc_centers.weight} + + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsn_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsn_head.py new file mode 100644 index 0000000..f36fb6a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/tsn_head.py @@ -0,0 +1,91 @@ +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import AvgConsensus, BaseHead + + +@HEADS.register_module() +class TSNHead(BaseHead): + """Class head for TSN. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss'). + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + consensus (dict): Consensus config dict. + dropout_ratio (float): Probability of dropout layer. Default: 0.4. + init_std (float): Std value for Initiation. Default: 0.01. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss'), + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1), + dropout_ratio=0.4, + init_std=0.01, + **kwargs): + super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs) + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + + consensus_ = consensus.copy() + + consensus_type = consensus_.pop('type') + if consensus_type == 'AvgConsensus': + self.consensus = AvgConsensus(**consensus_) + else: + self.consensus = None + + if self.spatial_type == 'avg': + # use `nn.AdaptiveAvgPool2d` to adaptively match the in_channels. + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + else: + self.avg_pool = None + + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc_cls, std=self.init_std) + + def forward(self, x, num_segs): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + num_segs (int): Number of segments into which a video + is divided. + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N * num_segs, in_channels, 7, 7] + if self.avg_pool is not None: + x = self.avg_pool(x) + # [N * num_segs, in_channels, 1, 1] + x = x.reshape((-1, num_segs) + x.shape[1:]) + # [N, num_segs, in_channels, 1, 1] + x = self.consensus(x) + # [N, 1, in_channels, 1, 1] + x = x.squeeze(1) + # [N, in_channels, 1, 1] + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels, 1, 1] + x = x.view(x.size(0), -1) + # [N, in_channels] + cls_score = self.fc_cls(x) + # [N, num_classes] + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/x3d_head.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/x3d_head.py new file mode 100644 index 0000000..2452df0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/heads/x3d_head.py @@ -0,0 +1,89 @@ +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .base import BaseHead + + +@HEADS.register_module() +class X3DHead(BaseHead): + """Classification head for I3D. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. + dropout_ratio (float): Probability of dropout layer. Default: 0.5. + init_std (float): Std value for Initiation. Default: 0.01. + fc1_bias (bool): If the first fc layer has bias. Default: False. + """ + + def __init__(self, + num_classes, + in_channels, + loss_cls=dict(type='CrossEntropyLoss'), + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01, + fc1_bias=False): + super().__init__(num_classes, in_channels, loss_cls) + + self.spatial_type = spatial_type + self.dropout_ratio = dropout_ratio + self.init_std = init_std + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.in_channels = in_channels + self.mid_channels = 2048 + self.num_classes = num_classes + self.fc1_bias = fc1_bias + + self.fc1 = nn.Linear( + self.in_channels, self.mid_channels, bias=self.fc1_bias) + self.fc2 = nn.Linear(self.mid_channels, self.num_classes) + + self.relu = nn.ReLU() + + self.pool = None + if self.spatial_type == 'avg': + self.pool = nn.AdaptiveAvgPool3d((1, 1, 1)) + elif self.spatial_type == 'max': + self.pool = nn.AdaptiveMaxPool3d((1, 1, 1)) + else: + raise NotImplementedError + + def init_weights(self): + """Initiate the parameters from scratch.""" + normal_init(self.fc1, std=self.init_std) + normal_init(self.fc2, std=self.init_std) + + def forward(self, x): + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The classification scores for input samples. + """ + # [N, in_channels, T, H, W] + assert self.pool is not None + x = self.pool(x) + # [N, in_channels, 1, 1, 1] + # [N, in_channels, 1, 1, 1] + x = x.view(x.shape[0], -1) + # [N, in_channels] + x = self.fc1(x) + # [N, 2048] + x = self.relu(x) + + if self.dropout is not None: + x = self.dropout(x) + + cls_score = self.fc2(x) + # [N, num_classes] + return cls_score diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/__init__.py new file mode 100644 index 0000000..0d50890 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/__init__.py @@ -0,0 +1,6 @@ +from .base import BaseLocalizer +from .bmn import BMN +from .bsn import PEM, TEM +from .ssn import SSN + +__all__ = ['PEM', 'TEM', 'BMN', 'SSN', 'BaseLocalizer'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/base.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/base.py new file mode 100644 index 0000000..abc7155 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/base.py @@ -0,0 +1,143 @@ +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +import torch +import torch.distributed as dist +import torch.nn as nn + +from .. import builder + + +class BaseLocalizer(nn.Module, metaclass=ABCMeta): + """Base class for localizers. + + All localizers should subclass it. All subclass should overwrite: + Methods:``forward_train``, supporting to forward when training. + Methods:``forward_test``, supporting to forward when testing. + """ + + def __init__(self, backbone, cls_head, train_cfg=None, test_cfg=None): + super().__init__() + self.backbone = builder.build_backbone(backbone) + self.cls_head = builder.build_head(cls_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.init_weights() + + def init_weights(self): + """Weight initialization for model.""" + self.backbone.init_weights() + self.cls_head.init_weights() + + def extract_feat(self, imgs): + """Extract features through a backbone. + + Args: + imgs (torch.Tensor): The input images. + Returns: + torch.tensor: The extracted features. + """ + x = self.backbone(imgs) + return x + + @abstractmethod + def forward_train(self, imgs, labels): + """Defines the computation performed at training.""" + + @abstractmethod + def forward_test(self, imgs): + """Defines the computation performed at testing.""" + + def forward(self, imgs, return_loss=True, **kwargs): + """Define the computation performed at every call.""" + if return_loss: + return self.forward_train(imgs, **kwargs) + + return self.forward_test(imgs, **kwargs) + + @staticmethod + def _parse_losses(losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary information. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor + which may be a weighted sum of all losses, log_vars contains + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def train_step(self, data_batch, optimizer, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data_batch (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, + ``num_samples``. + ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + ``log_vars`` contains all the variables to be sent to the + logger. + ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + losses = self.forward(**data_batch) + + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(next(iter(data_batch.values())))) + + return outputs + + def val_step(self, data_batch, optimizer, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + results = self.forward(return_loss=False, **data_batch) + + outputs = dict(results=results) + + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bmn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bmn.py new file mode 100644 index 0000000..20efbc5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bmn.py @@ -0,0 +1,415 @@ +import math + +import numpy as np +import torch +import torch.nn as nn + +from ...localization import temporal_iop, temporal_iou +from ..builder import build_loss +from ..registry import LOCALIZERS +from .base import BaseLocalizer +from .utils import post_processing + + +@LOCALIZERS.register_module() +class BMN(BaseLocalizer): + """Boundary Matching Network for temporal action proposal generation. + + Please refer `BMN: Boundary-Matching Network for Temporal Action Proposal + Generation `_. + Code Reference https://github.com/JJBOY/BMN-Boundary-Matching-Network + + Args: + temporal_dim (int): Total frames selected for each video. + boundary_ratio (float): Ratio for determining video boundaries. + num_samples (int): Number of samples for each proposal. + num_samples_per_bin (int): Number of bin samples for each sample. + feat_dim (int): Feature dimension. + soft_nms_alpha (float): Soft NMS alpha. + soft_nms_low_threshold (float): Soft NMS low threshold. + soft_nms_high_threshold (float): Soft NMS high threshold. + post_process_top_k (int): Top k proposals in post process. + feature_extraction_interval (int): + Interval used in feature extraction. Default: 16. + loss_cls (dict): Config for building loss. + Default: ``dict(type='BMNLoss')``. + hidden_dim_1d (int): Hidden dim for 1d conv. Default: 256. + hidden_dim_2d (int): Hidden dim for 2d conv. Default: 128. + hidden_dim_3d (int): Hidden dim for 3d conv. Default: 512. + """ + + def __init__(self, + temporal_dim, + boundary_ratio, + num_samples, + num_samples_per_bin, + feat_dim, + soft_nms_alpha, + soft_nms_low_threshold, + soft_nms_high_threshold, + post_process_top_k, + feature_extraction_interval=16, + loss_cls=dict(type='BMNLoss'), + hidden_dim_1d=256, + hidden_dim_2d=128, + hidden_dim_3d=512): + super(BaseLocalizer, self).__init__() + + self.tscale = temporal_dim + self.boundary_ratio = boundary_ratio + self.num_samples = num_samples + self.num_samples_per_bin = num_samples_per_bin + self.feat_dim = feat_dim + self.soft_nms_alpha = soft_nms_alpha + self.soft_nms_low_threshold = soft_nms_low_threshold + self.soft_nms_high_threshold = soft_nms_high_threshold + self.post_process_top_k = post_process_top_k + self.feature_extraction_interval = feature_extraction_interval + self.loss_cls = build_loss(loss_cls) + self.hidden_dim_1d = hidden_dim_1d + self.hidden_dim_2d = hidden_dim_2d + self.hidden_dim_3d = hidden_dim_3d + + self._get_interp1d_mask() + + # Base Module + self.x_1d_b = nn.Sequential( + nn.Conv1d( + self.feat_dim, + self.hidden_dim_1d, + kernel_size=3, + padding=1, + groups=4), nn.ReLU(inplace=True), + nn.Conv1d( + self.hidden_dim_1d, + self.hidden_dim_1d, + kernel_size=3, + padding=1, + groups=4), nn.ReLU(inplace=True)) + + # Temporal Evaluation Module + self.x_1d_s = nn.Sequential( + nn.Conv1d( + self.hidden_dim_1d, + self.hidden_dim_1d, + kernel_size=3, + padding=1, + groups=4), nn.ReLU(inplace=True), + nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid()) + self.x_1d_e = nn.Sequential( + nn.Conv1d( + self.hidden_dim_1d, + self.hidden_dim_1d, + kernel_size=3, + padding=1, + groups=4), nn.ReLU(inplace=True), + nn.Conv1d(self.hidden_dim_1d, 1, kernel_size=1), nn.Sigmoid()) + + # Proposal Evaluation Module + self.x_1d_p = nn.Sequential( + nn.Conv1d( + self.hidden_dim_1d, + self.hidden_dim_1d, + kernel_size=3, + padding=1), nn.ReLU(inplace=True)) + self.x_3d_p = nn.Sequential( + nn.Conv3d( + self.hidden_dim_1d, + self.hidden_dim_3d, + kernel_size=(self.num_samples, 1, 1)), nn.ReLU(inplace=True)) + self.x_2d_p = nn.Sequential( + nn.Conv2d(self.hidden_dim_3d, self.hidden_dim_2d, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d( + self.hidden_dim_2d, + self.hidden_dim_2d, + kernel_size=3, + padding=1), nn.ReLU(inplace=True), + nn.Conv2d( + self.hidden_dim_2d, + self.hidden_dim_2d, + kernel_size=3, + padding=1), nn.ReLU(inplace=True), + nn.Conv2d(self.hidden_dim_2d, 2, kernel_size=1), nn.Sigmoid()) + self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors( + -0.5, 1.5) + self.match_map = self._match_map() + self.bm_mask = self._get_bm_mask() + + def _match_map(self): + """Generate match map.""" + temporal_gap = 1. / self.tscale + match_map = [] + for idx in range(self.tscale): + match_window = [] + tmin = temporal_gap * idx + for jdx in range(1, self.tscale + 1): + tmax = tmin + temporal_gap * jdx + match_window.append([tmin, tmax]) + match_map.append(match_window) + match_map = np.array(match_map) + match_map = np.transpose(match_map, [1, 0, 2]) + match_map = np.reshape(match_map, [-1, 2]) + return match_map + + def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.): + """Generate temporal anchors. + + Args: + tmin_offset (int): Offset for the minimum value of temporal anchor. + Default: 0. + tmax_offset (int): Offset for the maximun value of temporal anchor. + Default: 1. + + Returns: + tuple[Sequence[float]]: The minimum and maximum values of temporal + anchors. + """ + temporal_gap = 1. / self.tscale + anchors_tmins = [] + anchors_tmaxs = [] + for i in range(self.tscale): + anchors_tmins.append(temporal_gap * (i + tmin_offset)) + anchors_tmaxs.append(temporal_gap * (i + tmax_offset)) + + return anchors_tmins, anchors_tmaxs + + def _forward(self, x): + """Define the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + # x.shape [batch_size, self.feat_dim, self.tscale] + base_feature = self.x_1d_b(x) + # base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale] + start = self.x_1d_s(base_feature).squeeze(1) + # start.shape [batch_size, self.tscale] + end = self.x_1d_e(base_feature).squeeze(1) + # end.shape [batch_size, self.tscale] + confidence_map = self.x_1d_p(base_feature) + # [batch_size, self.hidden_dim_1d, self.tscale] + confidence_map = self._boundary_matching_layer(confidence_map) + # [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa + confidence_map = self.x_3d_p(confidence_map).squeeze(2) + # [batch_size, self.hidden_dim_3d, self.tscale, self.tscale] + confidence_map = self.x_2d_p(confidence_map) + # [batch_size, 2, self.tscale, self.tscale] + + return confidence_map, start, end + + def _boundary_matching_layer(self, x): + """Generate matching layer.""" + input_size = x.size() + out = torch.matmul(x, + self.sample_mask).reshape(input_size[0], + input_size[1], + self.num_samples, + self.tscale, self.tscale) + return out + + def forward_test(self, raw_feature, video_meta): + """Define the computation performed at every call when testing.""" + confidence_map, start, end = self._forward(raw_feature) + start_scores = start[0].cpu().numpy() + end_scores = end[0].cpu().numpy() + cls_confidence = (confidence_map[0][1]).cpu().numpy() + reg_confidence = (confidence_map[0][0]).cpu().numpy() + + max_start = max(start_scores) + max_end = max(end_scores) + + # generate the set of start points and end points + start_bins = np.zeros(len(start_scores)) + start_bins[0] = 1 # [1,0,0...,0,0] + end_bins = np.zeros(len(end_scores)) + end_bins[-1] = 1 # [0,0,0...,0,1] + for idx in range(1, self.tscale - 1): + if start_scores[idx] > start_scores[ + idx + 1] and start_scores[idx] > start_scores[idx - 1]: + start_bins[idx] = 1 + elif start_scores[idx] > (0.5 * max_start): + start_bins[idx] = 1 + if end_scores[idx] > end_scores[ + idx + 1] and end_scores[idx] > end_scores[idx - 1]: + end_bins[idx] = 1 + elif end_scores[idx] > (0.5 * max_end): + end_bins[idx] = 1 + + # iterate through all combinations of start_index and end_index + new_proposals = [] + for idx in range(self.tscale): + for jdx in range(self.tscale): + start_index = jdx + end_index = start_index + idx + 1 + if end_index < self.tscale and start_bins[ + start_index] == 1 and end_bins[end_index] == 1: + tmin = start_index / self.tscale + tmax = end_index / self.tscale + tmin_score = start_scores[start_index] + tmax_score = end_scores[end_index] + cls_score = cls_confidence[idx, jdx] + reg_score = reg_confidence[idx, jdx] + score = tmin_score * tmax_score * cls_score * reg_score + new_proposals.append([ + tmin, tmax, tmin_score, tmax_score, cls_score, + reg_score, score + ]) + new_proposals = np.stack(new_proposals) + video_info = dict(video_meta[0]) + proposal_list = post_processing(new_proposals, video_info, + self.soft_nms_alpha, + self.soft_nms_low_threshold, + self.soft_nms_high_threshold, + self.post_process_top_k, + self.feature_extraction_interval) + output = [ + dict( + video_name=video_info['video_name'], + proposal_list=proposal_list) + ] + return output + + def forward_train(self, raw_feature, label_confidence, label_start, + label_end): + """Define the computation performed at every call when training.""" + confidence_map, start, end = self._forward(raw_feature) + loss = self.loss_cls(confidence_map, start, end, label_confidence, + label_start, label_end, + self.bm_mask.to(raw_feature.device)) + loss_dict = dict(loss=loss[0]) + return loss_dict + + def generate_labels(self, gt_bbox): + """Generate training labels.""" + match_score_confidence_list = [] + match_score_start_list = [] + match_score_end_list = [] + for every_gt_bbox in gt_bbox: + gt_iou_map = [] + for start, end in every_gt_bbox: + start = start.numpy() + end = end.numpy() + current_gt_iou_map = temporal_iou(self.match_map[:, 0], + self.match_map[:, 1], start, + end) + current_gt_iou_map = np.reshape(current_gt_iou_map, + [self.tscale, self.tscale]) + gt_iou_map.append(current_gt_iou_map) + gt_iou_map = np.array(gt_iou_map).astype(np.float32) + gt_iou_map = np.max(gt_iou_map, axis=0) + + gt_tmins = every_gt_bbox[:, 0] + gt_tmaxs = every_gt_bbox[:, 1] + + gt_len_pad = 3 * (1. / self.tscale) + + gt_start_bboxs = np.stack( + (gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1) + gt_end_bboxs = np.stack( + (gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1) + + match_score_start = [] + match_score_end = [] + + for anchor_tmin, anchor_tmax in zip(self.anchors_tmins, + self.anchors_tmaxs): + match_score_start.append( + np.max( + temporal_iop(anchor_tmin, anchor_tmax, + gt_start_bboxs[:, 0], gt_start_bboxs[:, + 1]))) + match_score_end.append( + np.max( + temporal_iop(anchor_tmin, anchor_tmax, + gt_end_bboxs[:, 0], gt_end_bboxs[:, 1]))) + match_score_confidence_list.append(gt_iou_map) + match_score_start_list.append(match_score_start) + match_score_end_list.append(match_score_end) + match_score_confidence_list = torch.Tensor(match_score_confidence_list) + match_score_start_list = torch.Tensor(match_score_start_list) + match_score_end_list = torch.Tensor(match_score_end_list) + return (match_score_confidence_list, match_score_start_list, + match_score_end_list) + + def forward(self, + raw_feature, + gt_bbox=None, + video_meta=None, + return_loss=True): + """Define the computation performed at every call.""" + if return_loss: + label_confidence, label_start, label_end = ( + self.generate_labels(gt_bbox)) + device = raw_feature.device + label_confidence = label_confidence.to(device) + label_start = label_start.to(device) + label_end = label_end.to(device) + return self.forward_train(raw_feature, label_confidence, + label_start, label_end) + + return self.forward_test(raw_feature, video_meta) + + @staticmethod + def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples, + num_samples_per_bin): + """Generate sample mask for a boundary-matching pair.""" + plen = float(seg_tmax - seg_tmin) + plen_sample = plen / (num_samples * num_samples_per_bin - 1.0) + total_samples = [ + seg_tmin + plen_sample * i + for i in range(num_samples * num_samples_per_bin) + ] + p_mask = [] + for idx in range(num_samples): + bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) * + num_samples_per_bin] + bin_vector = np.zeros(tscale) + for sample in bin_samples: + sample_upper = math.ceil(sample) + sample_decimal, sample_down = math.modf(sample) + if 0 <= int(sample_down) <= (tscale - 1): + bin_vector[int(sample_down)] += 1 - sample_decimal + if 0 <= int(sample_upper) <= (tscale - 1): + bin_vector[int(sample_upper)] += sample_decimal + bin_vector = 1.0 / num_samples_per_bin * bin_vector + p_mask.append(bin_vector) + p_mask = np.stack(p_mask, axis=1) + return p_mask + + def _get_interp1d_mask(self): + """Generate sample mask for each point in Boundary-Matching Map.""" + mask_mat = [] + for start_index in range(self.tscale): + mask_mat_vector = [] + for duration_index in range(self.tscale): + if start_index + duration_index < self.tscale: + p_tmin = start_index + p_tmax = start_index + duration_index + center_len = float(p_tmax - p_tmin) + 1 + sample_tmin = p_tmin - (center_len * self.boundary_ratio) + sample_tmax = p_tmax + (center_len * self.boundary_ratio) + p_mask = self._get_interp1d_bin_mask( + sample_tmin, sample_tmax, self.tscale, + self.num_samples, self.num_samples_per_bin) + else: + p_mask = np.zeros([self.tscale, self.num_samples]) + mask_mat_vector.append(p_mask) + mask_mat_vector = np.stack(mask_mat_vector, axis=2) + mask_mat.append(mask_mat_vector) + mask_mat = np.stack(mask_mat, axis=3) + mask_mat = mask_mat.astype(np.float32) + self.sample_mask = nn.Parameter( + torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False) + + def _get_bm_mask(self): + """Generate Boundary-Matching Mask.""" + bm_mask = [] + for idx in range(self.tscale): + mask_vector = [1] * (self.tscale - idx) + [0] * idx + bm_mask.append(mask_vector) + bm_mask = torch.tensor(bm_mask, dtype=torch.float) + return bm_mask diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bsn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bsn.py new file mode 100644 index 0000000..23f3e72 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/bsn.py @@ -0,0 +1,395 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ...localization import temporal_iop +from ..builder import build_loss +from ..registry import LOCALIZERS +from .base import BaseLocalizer +from .utils import post_processing + + +@LOCALIZERS.register_module() +class TEM(BaseLocalizer): + """Temporal Evaluation Model for Boundary Sensetive Network. + + Please refer `BSN: Boundary Sensitive Network for Temporal Action + Proposal Generation `_. + + Code reference + https://github.com/wzmsltw/BSN-boundary-sensitive-network + + Args: + tem_feat_dim (int): Feature dimension. + tem_hidden_dim (int): Hidden layer dimension. + tem_match_threshold (float): Temporal evaluation match threshold. + loss_cls (dict): Config for building loss. + Default: ``dict(type='BinaryLogisticRegressionLoss')``. + loss_weight (float): Weight term for action_loss. Default: 2. + output_dim (int): Output dimension. Default: 3. + conv1_ratio (float): Ratio of conv1 layer output. Default: 1.0. + conv2_ratio (float): Ratio of conv2 layer output. Default: 1.0. + conv3_ratio (float): Ratio of conv3 layer output. Default: 0.01. + """ + + def __init__(self, + temporal_dim, + boundary_ratio, + tem_feat_dim, + tem_hidden_dim, + tem_match_threshold, + loss_cls=dict(type='BinaryLogisticRegressionLoss'), + loss_weight=2, + output_dim=3, + conv1_ratio=1, + conv2_ratio=1, + conv3_ratio=0.01): + super(BaseLocalizer, self).__init__() + + self.temporal_dim = temporal_dim + self.boundary_ratio = boundary_ratio + self.feat_dim = tem_feat_dim + self.c_hidden = tem_hidden_dim + self.match_threshold = tem_match_threshold + self.output_dim = output_dim + self.loss_cls = build_loss(loss_cls) + self.loss_weight = loss_weight + self.conv1_ratio = conv1_ratio + self.conv2_ratio = conv2_ratio + self.conv3_ratio = conv3_ratio + + self.conv1 = nn.Conv1d( + in_channels=self.feat_dim, + out_channels=self.c_hidden, + kernel_size=3, + stride=1, + padding=1, + groups=1) + self.conv2 = nn.Conv1d( + in_channels=self.c_hidden, + out_channels=self.c_hidden, + kernel_size=3, + stride=1, + padding=1, + groups=1) + self.conv3 = nn.Conv1d( + in_channels=self.c_hidden, + out_channels=self.output_dim, + kernel_size=1, + stride=1, + padding=0) + self.anchors_tmins, self.anchors_tmaxs = self._temporal_anchors() + + def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.): + """Generate temporal anchors. + + Args: + tmin_offset (int): Offset for the minimum value of temporal anchor. + Default: 0. + tmax_offset (int): Offset for the maximun value of temporal anchor. + Default: 1. + + Returns: + tuple[Sequence[float]]: The minimum and maximum values of temporal + anchors. + """ + temporal_gap = 1. / self.temporal_dim + anchors_tmins = [] + anchors_tmaxs = [] + for i in range(self.temporal_dim): + anchors_tmins.append(temporal_gap * (i + tmin_offset)) + anchors_tmaxs.append(temporal_gap * (i + tmax_offset)) + + return anchors_tmins, anchors_tmaxs + + def _forward(self, x): + """Define the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + x = F.relu(self.conv1_ratio * self.conv1(x)) + x = F.relu(self.conv2_ratio * self.conv2(x)) + x = torch.sigmoid(self.conv3_ratio * self.conv3(x)) + return x + + def forward_train(self, raw_feature, label_action, label_start, label_end): + """Define the computation performed at every call when training.""" + tem_output = self._forward(raw_feature) + score_action = tem_output[:, 0, :] + score_start = tem_output[:, 1, :] + score_end = tem_output[:, 2, :] + + loss_action = self.loss_cls(score_action, label_action, + self.match_threshold) + loss_start_small = self.loss_cls(score_start, label_start, + self.match_threshold) + loss_end_small = self.loss_cls(score_end, label_end, + self.match_threshold) + loss_dict = { + 'loss_action': loss_action * self.loss_weight, + 'loss_start': loss_start_small, + 'loss_end': loss_end_small + } + + return loss_dict + + def forward_test(self, raw_feature, video_meta): + """Define the computation performed at every call when testing.""" + tem_output = self._forward(raw_feature).cpu().numpy() + batch_action = tem_output[:, 0, :] + batch_start = tem_output[:, 1, :] + batch_end = tem_output[:, 2, :] + + video_meta_list = [dict(x) for x in video_meta] + + video_results = [] + + for batch_idx, _ in enumerate(batch_action): + video_name = video_meta_list[batch_idx]['video_name'] + video_action = batch_action[batch_idx] + video_start = batch_start[batch_idx] + video_end = batch_end[batch_idx] + video_result = np.stack((video_action, video_start, video_end, + self.anchors_tmins, self.anchors_tmaxs), + axis=1) + video_results.append((video_name, video_result)) + return video_results + + def generate_labels(self, gt_bbox): + """Generate training labels.""" + match_score_action_list = [] + match_score_start_list = [] + match_score_end_list = [] + for every_gt_bbox in gt_bbox: + gt_tmins = every_gt_bbox[:, 0].cpu().numpy() + gt_tmaxs = every_gt_bbox[:, 1].cpu().numpy() + + gt_lens = gt_tmaxs - gt_tmins + gt_len_pad = np.maximum(1. / self.temporal_dim, + self.boundary_ratio * gt_lens) + + gt_start_bboxs = np.stack( + (gt_tmins - gt_len_pad / 2, gt_tmins + gt_len_pad / 2), axis=1) + gt_end_bboxs = np.stack( + (gt_tmaxs - gt_len_pad / 2, gt_tmaxs + gt_len_pad / 2), axis=1) + + match_score_action = [] + match_score_start = [] + match_score_end = [] + + for anchor_tmin, anchor_tmax in zip(self.anchors_tmins, + self.anchors_tmaxs): + match_score_action.append( + np.max( + temporal_iop(anchor_tmin, anchor_tmax, gt_tmins, + gt_tmaxs))) + match_score_start.append( + np.max( + temporal_iop(anchor_tmin, anchor_tmax, + gt_start_bboxs[:, 0], gt_start_bboxs[:, + 1]))) + match_score_end.append( + np.max( + temporal_iop(anchor_tmin, anchor_tmax, + gt_end_bboxs[:, 0], gt_end_bboxs[:, 1]))) + match_score_action_list.append(match_score_action) + match_score_start_list.append(match_score_start) + match_score_end_list.append(match_score_end) + match_score_action_list = torch.Tensor(match_score_action_list) + match_score_start_list = torch.Tensor(match_score_start_list) + match_score_end_list = torch.Tensor(match_score_end_list) + return (match_score_action_list, match_score_start_list, + match_score_end_list) + + def forward(self, + raw_feature, + gt_bbox=None, + video_meta=None, + return_loss=True): + """Define the computation performed at every call.""" + if return_loss: + label_action, label_start, label_end = ( + self.generate_labels(gt_bbox)) + device = raw_feature.device + label_action = label_action.to(device) + label_start = label_start.to(device) + label_end = label_end.to(device) + return self.forward_train(raw_feature, label_action, label_start, + label_end) + + return self.forward_test(raw_feature, video_meta) + + +@LOCALIZERS.register_module() +class PEM(BaseLocalizer): + """Proposals Evaluation Model for Boundary Sensetive Network. + + Please refer `BSN: Boundary Sensitive Network for Temporal Action + Proposal Generation `_. + + Code reference + https://github.com/wzmsltw/BSN-boundary-sensitive-network + + Args: + pem_feat_dim (int): Feature dimension. + pem_hidden_dim (int): Hidden layer dimension. + pem_u_ratio_m (float): Ratio for medium score proprosals to balance + data. + pem_u_ratio_l (float): Ratio for low score proprosals to balance data. + pem_high_temporal_iou_threshold (float): High IoU threshold. + pem_low_temporal_iou_threshold (float): Low IoU threshold. + soft_nms_alpha (float): Soft NMS alpha. + soft_nms_low_threshold (float): Soft NMS low threshold. + soft_nms_high_threshold (float): Soft NMS high threshold. + post_process_top_k (int): Top k proposals in post process. + feature_extraction_interval (int): + Interval used in feature extraction. Default: 16. + fc1_ratio (float): Ratio for fc1 layer output. Default: 0.1. + fc2_ratio (float): Ratio for fc2 layer output. Default: 0.1. + output_dim (int): Output dimension. Default: 1. + """ + + def __init__(self, + pem_feat_dim, + pem_hidden_dim, + pem_u_ratio_m, + pem_u_ratio_l, + pem_high_temporal_iou_threshold, + pem_low_temporal_iou_threshold, + soft_nms_alpha, + soft_nms_low_threshold, + soft_nms_high_threshold, + post_process_top_k, + feature_extraction_interval=16, + fc1_ratio=0.1, + fc2_ratio=0.1, + output_dim=1): + super(BaseLocalizer, self).__init__() + + self.feat_dim = pem_feat_dim + self.hidden_dim = pem_hidden_dim + self.u_ratio_m = pem_u_ratio_m + self.u_ratio_l = pem_u_ratio_l + self.pem_high_temporal_iou_threshold = pem_high_temporal_iou_threshold + self.pem_low_temporal_iou_threshold = pem_low_temporal_iou_threshold + self.soft_nms_alpha = soft_nms_alpha + self.soft_nms_low_threshold = soft_nms_low_threshold + self.soft_nms_high_threshold = soft_nms_high_threshold + self.post_process_top_k = post_process_top_k + self.feature_extraction_interval = feature_extraction_interval + self.fc1_ratio = fc1_ratio + self.fc2_ratio = fc2_ratio + self.output_dim = output_dim + + self.fc1 = nn.Linear( + in_features=self.feat_dim, out_features=self.hidden_dim, bias=True) + self.fc2 = nn.Linear( + in_features=self.hidden_dim, + out_features=self.output_dim, + bias=True) + + def _forward(self, x): + """Define the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + torch.Tensor: The output of the module. + """ + x = torch.cat(list(x)) + x = F.relu(self.fc1_ratio * self.fc1(x)) + x = torch.sigmoid(self.fc2_ratio * self.fc2(x)) + return x + + def forward_train(self, bsp_feature, reference_temporal_iou): + """Define the computation performed at every call when training.""" + pem_output = self._forward(bsp_feature) + reference_temporal_iou = torch.cat(list(reference_temporal_iou)) + device = pem_output.device + reference_temporal_iou = reference_temporal_iou.to(device) + + anchors_temporal_iou = pem_output.view(-1) + u_hmask = (reference_temporal_iou > + self.pem_high_temporal_iou_threshold).float() + u_mmask = ( + (reference_temporal_iou <= self.pem_high_temporal_iou_threshold) + & (reference_temporal_iou > self.pem_low_temporal_iou_threshold) + ).float() + u_lmask = (reference_temporal_iou <= + self.pem_low_temporal_iou_threshold).float() + + num_h = torch.sum(u_hmask) + num_m = torch.sum(u_mmask) + num_l = torch.sum(u_lmask) + + r_m = self.u_ratio_m * num_h / (num_m) + r_m = torch.min(r_m, torch.Tensor([1.0]).to(device))[0] + u_smmask = torch.rand(u_hmask.size()[0], device=device) + u_smmask = u_smmask * u_mmask + u_smmask = (u_smmask > (1. - r_m)).float() + + r_l = self.u_ratio_l * num_h / (num_l) + r_l = torch.min(r_l, torch.Tensor([1.0]).to(device))[0] + u_slmask = torch.rand(u_hmask.size()[0], device=device) + u_slmask = u_slmask * u_lmask + u_slmask = (u_slmask > (1. - r_l)).float() + + temporal_iou_weights = u_hmask + u_smmask + u_slmask + temporal_iou_loss = F.smooth_l1_loss(anchors_temporal_iou, + reference_temporal_iou) + temporal_iou_loss = torch.sum( + temporal_iou_loss * + temporal_iou_weights) / torch.sum(temporal_iou_weights) + loss_dict = dict(temporal_iou_loss=temporal_iou_loss) + + return loss_dict + + def forward_test(self, bsp_feature, tmin, tmax, tmin_score, tmax_score, + video_meta): + """Define the computation performed at every call when testing.""" + pem_output = self._forward(bsp_feature).view(-1).cpu().numpy().reshape( + -1, 1) + + tmin = tmin.view(-1).cpu().numpy().reshape(-1, 1) + tmax = tmax.view(-1).cpu().numpy().reshape(-1, 1) + tmin_score = tmin_score.view(-1).cpu().numpy().reshape(-1, 1) + tmax_score = tmax_score.view(-1).cpu().numpy().reshape(-1, 1) + score = np.array(pem_output * tmin_score * tmax_score).reshape(-1, 1) + result = np.concatenate( + (tmin, tmax, tmin_score, tmax_score, pem_output, score), axis=1) + result = result.reshape(-1, 6) + video_info = dict(video_meta[0]) + proposal_list = post_processing(result, video_info, + self.soft_nms_alpha, + self.soft_nms_low_threshold, + self.soft_nms_high_threshold, + self.post_process_top_k, + self.feature_extraction_interval) + output = [ + dict( + video_name=video_info['video_name'], + proposal_list=proposal_list) + ] + return output + + def forward(self, + bsp_feature, + reference_temporal_iou=None, + tmin=None, + tmax=None, + tmin_score=None, + tmax_score=None, + video_meta=None, + return_loss=True): + """Define the computation performed at every call.""" + if return_loss: + return self.forward_train(bsp_feature, reference_temporal_iou) + + return self.forward_test(bsp_feature, tmin, tmax, tmin_score, + tmax_score, video_meta) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/ssn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/ssn.py new file mode 100644 index 0000000..2ab9973 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/ssn.py @@ -0,0 +1,134 @@ +import torch +import torch.nn as nn + +from .. import builder +from ..registry import LOCALIZERS +from .base import BaseLocalizer + + +@LOCALIZERS.register_module() +class SSN(BaseLocalizer): + """Temporal Action Detection with Structured Segment Networks. + + Args: + backbone (dict): Config for building backbone. + cls_head (dict): Config for building classification head. + in_channels (int): Number of channels for input data. + Default: 3. + spatial_type (str): Type of spatial pooling. + Default: 'avg'. + dropout_ratio (float): Ratio of dropout. + Default: 0.5. + loss_cls (dict): Config for building loss. + Default: ``dict(type='SSNLoss')``. + train_cfg (dict | None): Config for training. Default: None. + test_cfg (dict | None): Config for testing. Default: None. + """ + + def __init__(self, + backbone, + cls_head, + in_channels=3, + spatial_type='avg', + dropout_ratio=0.5, + loss_cls=dict(type='SSNLoss'), + train_cfg=None, + test_cfg=None): + + super().__init__(backbone, cls_head, train_cfg, test_cfg) + + self.is_test_prepared = False + self.in_channels = in_channels + + self.spatial_type = spatial_type + if self.spatial_type == 'avg': + self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0) + elif self.spatial_type == 'max': + self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0) + else: + self.pool = None + + self.dropout_ratio = dropout_ratio + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.loss_cls = builder.build_loss(loss_cls) + + def forward_train(self, imgs, proposal_scale_factor, proposal_type, + proposal_labels, reg_targets, **kwargs): + """Define the computation performed at every call when training.""" + imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:]) + + x = self.extract_feat(imgs) + + if self.pool: + x = self.pool(x) + if self.dropout is not None: + x = self.dropout(x) + + activity_scores, completeness_scores, bbox_preds = self.cls_head( + (x, proposal_scale_factor)) + + loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds, + proposal_type, proposal_labels, reg_targets, + self.train_cfg) + loss_dict = dict(**loss) + + return loss_dict + + def forward_test(self, imgs, relative_proposal_list, scale_factor_list, + proposal_tick_list, reg_norm_consts, **kwargs): + """Define the computation performed at every call when testing.""" + num_crops = imgs.shape[0] + imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:]) + num_ticks = imgs.shape[1] + + output = [] + minibatch_size = self.test_cfg.ssn.sampler.batch_size + for idx in range(0, num_ticks, minibatch_size): + chunk = imgs[:, idx:idx + + minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:]) + x = self.extract_feat(chunk) + if self.pool: + x = self.pool(x) + # Merge crop to save memory. + x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0) + output.append(x) + output = torch.cat(output, dim=0) + + relative_proposal_list = relative_proposal_list.squeeze(0) + proposal_tick_list = proposal_tick_list.squeeze(0) + scale_factor_list = scale_factor_list.squeeze(0) + reg_norm_consts = reg_norm_consts.squeeze(0) + + if not self.is_test_prepared: + self.is_test_prepared = self.cls_head.prepare_test_fc( + self.cls_head.consensus.num_multipliers) + + (output, activity_scores, completeness_scores, + bbox_preds) = self.cls_head( + (output, proposal_tick_list, scale_factor_list), test_mode=True) + + relative_proposal_list = relative_proposal_list.cpu().numpy() + activity_scores = activity_scores.cpu().numpy() + completeness_scores = completeness_scores.cpu().numpy() + if bbox_preds is not None: + bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2) + bbox_preds[:, :, 0] = ( + bbox_preds[:, :, 0] * reg_norm_consts[1, 0] + + reg_norm_consts[0, 0]) + bbox_preds[:, :, 1] = ( + bbox_preds[:, :, 1] * reg_norm_consts[1, 1] + + reg_norm_consts[0, 1]) + bbox_preds = bbox_preds.cpu().numpy() + + result = [ + dict( + relative_proposal_list=relative_proposal_list, + activity_scores=activity_scores, + completeness_scores=completeness_scores, + bbox_preds=bbox_preds) + ] + + return result diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/__init__.py new file mode 100644 index 0000000..bb2fc92 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/__init__.py @@ -0,0 +1,3 @@ +from .post_processing import post_processing + +__all__ = ['post_processing'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/post_processing.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/post_processing.py new file mode 100644 index 0000000..74a6f87 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/localizers/utils/post_processing.py @@ -0,0 +1,44 @@ +from mmaction.localization import soft_nms + + +def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold, + soft_nms_high_threshold, post_process_top_k, + feature_extraction_interval): + """Post process for temporal proposals generation. + + Args: + result (np.ndarray): Proposals generated by network. + video_info (dict): Meta data of video. Required keys are + 'duration_frame', 'duration_second'. + soft_nms_alpha (float): Alpha value of Gaussian decaying function. + soft_nms_low_threshold (float): Low threshold for soft nms. + soft_nms_high_threshold (float): High threshold for soft nms. + post_process_top_k (int): Top k values to be considered. + feature_extraction_interval (int): Interval used in feature extraction. + + Returns: + list[dict]: The updated proposals, e.g. + [{'score': 0.9, 'segment': [0, 1]}, + {'score': 0.8, 'segment': [0, 2]}, + ...]. + """ + if len(result) > 1: + result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold, + soft_nms_high_threshold, post_process_top_k) + + result = result[result[:, -1].argsort()[::-1]] + video_duration = float( + video_info['duration_frame'] // feature_extraction_interval * + feature_extraction_interval + ) / video_info['duration_frame'] * video_info['duration_second'] + proposal_list = [] + + for j in range(min(post_process_top_k, len(result))): + proposal = {} + proposal['score'] = float(result[j, -1]) + proposal['segment'] = [ + max(0, result[j, 0]) * video_duration, + min(1, result[j, 1]) * video_duration + ] + proposal_list.append(proposal) + return proposal_list diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/__init__.py new file mode 100644 index 0000000..93875af --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/__init__.py @@ -0,0 +1,19 @@ +from .base import BaseWeightedLoss +from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss +from .bmn_loss import BMNLoss +from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss +from .bnn_loss import BayesianNNLoss +from .edl_loss import EvidenceLoss +from .hvu_loss import HVULoss +from .nll_loss import NLLLoss +from .ohem_hinge_loss import OHEMHingeLoss +from .ssn_loss import SSNLoss +from .rebias_loss import RebiasLoss +from .rpl_loss import RPLoss +from .gcp_loss import GCPLoss + +__all__ = [ + 'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits', + 'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss', + 'HVULoss', "BayesianNNLoss", "EvidenceLoss", "RebiasLoss", 'RPLoss', 'GCPLoss' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/base.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/base.py new file mode 100644 index 0000000..eb1a43f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/base.py @@ -0,0 +1,44 @@ +from abc import ABCMeta, abstractmethod + +import torch.nn as nn + + +class BaseWeightedLoss(nn.Module, metaclass=ABCMeta): + """Base class for loss. + + All subclass should overwrite the ``_forward()`` method which returns the + normal loss without loss weights. + + Args: + loss_weight (float): Factor scalar multiplied on the loss. + Default: 1.0. + """ + + def __init__(self, loss_weight=1.0): + super().__init__() + self.loss_weight = loss_weight + + @abstractmethod + def _forward(self, *args, **kwargs): + pass + + def forward(self, *args, **kwargs): + """Defines the computation performed at every call. + + Args: + *args: The positional arguments for the corresponding + loss. + **kwargs: The keyword arguments for the corresponding + loss. + + Returns: + torch.Tensor: The calculated loss. + """ + ret = self._forward(*args, **kwargs) + if isinstance(ret, dict): + for k in ret: + if 'loss' in k: + ret[k] *= self.loss_weight + else: + ret *= self.loss_weight + return ret diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/binary_logistic_regression_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/binary_logistic_regression_loss.py new file mode 100644 index 0000000..343d1ca --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/binary_logistic_regression_loss.py @@ -0,0 +1,61 @@ +import torch +import torch.nn as nn + +from ..registry import LOSSES + + +def binary_logistic_regression_loss(reg_score, + label, + threshold=0.5, + ratio_range=(1.05, 21), + eps=1e-5): + """Binary Logistic Regression Loss.""" + label = label.view(-1).to(reg_score.device) + reg_score = reg_score.contiguous().view(-1) + + pmask = (label > threshold).float().to(reg_score.device) + num_positive = max(torch.sum(pmask), 1) + num_entries = len(label) + ratio = num_entries / num_positive + # clip ratio value between ratio_range + ratio = min(max(ratio, ratio_range[0]), ratio_range[1]) + + coef_0 = 0.5 * ratio / (ratio - 1) + coef_1 = 0.5 * ratio + loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * ( + 1.0 - pmask) * torch.log(1.0 - reg_score + eps) + loss = -torch.mean(loss) + return loss + + +@LOSSES.register_module() +class BinaryLogisticRegressionLoss(nn.Module): + """Binary Logistic Regression Loss. + + It will calculate binary logistic regression loss given reg_score and + label. + """ + + def forward(self, + reg_score, + label, + threshold=0.5, + ratio_range=(1.05, 21), + eps=1e-5): + """Calculate Binary Logistic Regression Loss. + + Args: + reg_score (torch.Tensor): Predicted score by model. + label (torch.Tensor): Groundtruth labels. + threshold (float): Threshold for positive instances. + Default: 0.5. + ratio_range (tuple): Lower bound and upper bound for ratio. + Default: (1.05, 21) + eps (float): Epsilon for small value. Default: 1e-5. + + Returns: + torch.Tensor: Returned binary logistic loss. + """ + + return binary_logistic_regression_loss(reg_score, label, threshold, + ratio_range, eps) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bmn_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bmn_loss.py new file mode 100644 index 0000000..50e4972 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bmn_loss.py @@ -0,0 +1,180 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES +from .binary_logistic_regression_loss import binary_logistic_regression_loss + + +@LOSSES.register_module() +class BMNLoss(nn.Module): + """BMN Loss. + + From paper https://arxiv.org/abs/1907.09702, + code https://github.com/JJBOY/BMN-Boundary-Matching-Network. + It will calculate loss for BMN Model. This loss is a weighted sum of + + 1) temporal evaluation loss based on confidence score of start and + end positions. + 2) proposal evaluation regression loss based on confidence scores of + candidate proposals. + 3) proposal evaluation classification loss based on classification + results of candidate proposals. + """ + + @staticmethod + def tem_loss(pred_start, pred_end, gt_start, gt_end): + """Calculate Temporal Evaluation Module Loss. + + This function calculate the binary_logistic_regression_loss for start + and end respectively and returns the sum of their losses. + + Args: + pred_start (torch.Tensor): Predicted start score by BMN model. + pred_end (torch.Tensor): Predicted end score by BMN model. + gt_start (torch.Tensor): Groundtruth confidence score for start. + gt_end (torch.Tensor): Groundtruth confidence score for end. + + Returns: + torch.Tensor: Returned binary logistic loss. + """ + loss_start = binary_logistic_regression_loss(pred_start, gt_start) + loss_end = binary_logistic_regression_loss(pred_end, gt_end) + loss = loss_start + loss_end + return loss + + @staticmethod + def pem_reg_loss(pred_score, + gt_iou_map, + mask, + high_temporal_iou_threshold=0.7, + low_temporal_iou_threshold=0.3): + """Calculate Proposal Evaluation Module Regression Loss. + + Args: + pred_score (torch.Tensor): Predicted temporal_iou score by BMN. + gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. + mask (torch.Tensor): Boundary-Matching mask. + high_temporal_iou_threshold (float): Higher threshold of + temporal_iou. Default: 0.7. + low_temporal_iou_threshold (float): Higher threshold of + temporal_iou. Default: 0.3. + + Returns: + torch.Tensor: Proposal evalutaion regression loss. + """ + u_hmask = (gt_iou_map > high_temporal_iou_threshold).float() + u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & + (gt_iou_map > low_temporal_iou_threshold)).float() + u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & + (gt_iou_map > 0.)).float() + u_lmask = u_lmask * mask + + num_h = torch.sum(u_hmask) + num_m = torch.sum(u_mmask) + num_l = torch.sum(u_lmask) + + r_m = num_h / num_m + u_smmask = torch.rand_like(gt_iou_map) + u_smmask = u_mmask * u_smmask + u_smmask = (u_smmask > (1. - r_m)).float() + + r_l = num_h / num_l + u_slmask = torch.rand_like(gt_iou_map) + u_slmask = u_lmask * u_slmask + u_slmask = (u_slmask > (1. - r_l)).float() + + weights = u_hmask + u_smmask + u_slmask + + loss = F.mse_loss(pred_score * weights, gt_iou_map * weights) + loss = 0.5 * torch.sum( + loss * torch.ones_like(weights)) / torch.sum(weights) + + return loss + + @staticmethod + def pem_cls_loss(pred_score, + gt_iou_map, + mask, + threshold=0.9, + ratio_range=(1.05, 21), + eps=1e-5): + """Calculate Proposal Evaluation Module Classification Loss. + + Args: + pred_score (torch.Tensor): Predicted temporal_iou score by BMN. + gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. + mask (torch.Tensor): Boundary-Matching mask. + threshold (float): Threshold of temporal_iou for positive + instances. Default: 0.9. + ratio_range (tuple): Lower bound and upper bound for ratio. + Default: (1.05, 21) + eps (float): Epsilon for small value. Default: 1e-5 + + Returns: + torch.Tensor: Proposal evalutaion classification loss. + """ + pmask = (gt_iou_map > threshold).float() + nmask = (gt_iou_map <= threshold).float() + nmask = nmask * mask + + num_positive = max(torch.sum(pmask), 1) + num_entries = num_positive + torch.sum(nmask) + ratio = num_entries / num_positive + ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1]) + + coef_0 = 0.5 * ratio / (ratio - 1) + coef_1 = 0.5 * ratio + + loss_pos = coef_1 * torch.log(pred_score + eps) * pmask + loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask + loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries + return loss + + def forward(self, + pred_bm, + pred_start, + pred_end, + gt_iou_map, + gt_start, + gt_end, + bm_mask, + weight_tem=1.0, + weight_pem_reg=10.0, + weight_pem_cls=1.0): + """Calculate Boundary Matching Network Loss. + + Args: + pred_bm (torch.Tensor): Predicted confidence score for boundary + matching map. + pred_start (torch.Tensor): Predicted confidence score for start. + pred_end (torch.Tensor): Predicted confidence score for end. + gt_iou_map (torch.Tensor): Groundtruth score for boundary matching + map. + gt_start (torch.Tensor): Groundtruth temporal_iou score for start. + gt_end (torch.Tensor): Groundtruth temporal_iou score for end. + bm_mask (torch.Tensor): Boundary-Matching mask. + weight_tem (float): Weight for tem loss. Default: 1.0. + weight_pem_reg (float): Weight for pem regression loss. + Default: 10.0. + weight_pem_cls (float): Weight for pem classification loss. + Default: 1.0. + + Returns: + tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): + (loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn + loss, tem_loss is the temporal evaluation loss, pem_reg_loss is + the proposal evaluation regression loss, pem_cls_loss is the + proposal evaluation classification loss. + """ + pred_bm_reg = pred_bm[:, 0].contiguous() + pred_bm_cls = pred_bm[:, 1].contiguous() + gt_iou_map = gt_iou_map * bm_mask + + pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask) + pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask) + tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end) + loss = ( + weight_tem * tem_loss + weight_pem_reg * pem_reg_loss + + weight_pem_cls * pem_cls_loss) + return loss, tem_loss, pem_reg_loss, pem_cls_loss diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bnn_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bnn_loss.py new file mode 100644 index 0000000..c524db1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/bnn_loss.py @@ -0,0 +1,54 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES +from ...core import top_k_accuracy + +@LOSSES.register_module() +class BayesianNNLoss(nn.Module): + """Bayesian NN Loss.""" + + def forward(self, cls_score, labels, output_dict, beta=1.0, **kwargs): + """Forward function. + + Args: + cls_score (torch.Tensor): The class score. + label (torch.Tensor): The ground truth label. + prior (torch.Tensor): The log prior + posterior (torch.Tensor): The log variational posterior + kwargs: Any keyword argument to be used to calculate + Bayesian NN loss. + + Returns: + torch.Tensor: The returned Bayesian NN loss. + """ + losses = dict() + if labels.shape == torch.Size([]): + labels = labels.unsqueeze(0) + + # negative log-likelihood (BCE loss) + loss_cls = F.cross_entropy(cls_score, labels, **kwargs) + # parse the output + log_prior = output_dict['log_prior'] + log_posterior = output_dict['log_posterior'] + + # complexity regularizer + loss_complexity = beta * (log_posterior - log_prior) + # total loss + loss = loss_cls + loss_complexity + # accuracy metrics + top_k_acc = top_k_accuracy(cls_score.detach().cpu().numpy(), + labels.detach().cpu().numpy(), (1, 5)) + losses = {'loss_cls': loss_cls, 'loss_complexity': loss_complexity, # items to be backwarded + 'LOSS_total': loss, # items for monitoring + 'log_posterior': beta * log_posterior, + 'log_prior': beta * log_prior, + 'top1_acc': torch.tensor(top_k_acc[0], device=cls_score.device), + 'top5_acc': torch.tensor(top_k_acc[1], device=cls_score.device) + } + if 'aleatoric' in output_dict: losses.update({'aleatoric': output_dict['aleatoric']}) + if 'epistemic' in output_dict: losses.update({'epistemic': output_dict['epistemic']}) + return losses + + diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/cross_entropy_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000..0212b59 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/cross_entropy_loss.py @@ -0,0 +1,45 @@ +import torch.nn.functional as F + +from ..registry import LOSSES +from .base import BaseWeightedLoss + + +@LOSSES.register_module() +class CrossEntropyLoss(BaseWeightedLoss): + """Cross Entropy Loss.""" + + def _forward(self, cls_score, label, **kwargs): + """Forward function. + + Args: + cls_score (torch.Tensor): The class score. + label (torch.Tensor): The ground truth label. + kwargs: Any keyword argument to be used to calculate + CrossEntropy loss. + + Returns: + torch.Tensor: The returned CrossEntropy loss. + """ + loss_cls = F.cross_entropy(cls_score, label, **kwargs) + return loss_cls + + +@LOSSES.register_module() +class BCELossWithLogits(BaseWeightedLoss): + """Binary Cross Entropy Loss with logits.""" + + def _forward(self, cls_score, label, **kwargs): + """Forward function. + + Args: + cls_score (torch.Tensor): The class score. + label (torch.Tensor): The ground truth label. + kwargs: Any keyword argument to be used to calculate + bce loss with logits. + + Returns: + torch.Tensor: The returned bce loss with logits. + """ + loss_cls = F.binary_cross_entropy_with_logits(cls_score, label, + **kwargs) + return loss_cls diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/edl_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/edl_loss.py new file mode 100644 index 0000000..736b09e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/edl_loss.py @@ -0,0 +1,210 @@ +import torch +import torch.nn.functional as F + +from ..registry import LOSSES +from .base import BaseWeightedLoss + +def relu_evidence(y): + return F.relu(y) + +def exp_evidence(y): + return torch.exp(torch.clamp(y, -10, 10)) + +def softplus_evidence(y): + return F.softplus(y) + +@LOSSES.register_module() +class EvidenceLoss(BaseWeightedLoss): + """Evidential MSE Loss.""" + def __init__(self, num_classes, + evidence='relu', + loss_type='mse', + with_kldiv=True, + with_avuloss=False, + disentangle=False, + annealing_method='step', + annealing_start=0.01, + annealing_step=10): + super().__init__() + self.num_classes = num_classes + self.evidence = evidence + self.loss_type = loss_type + self.with_kldiv = with_kldiv + self.with_avuloss = with_avuloss + self.disentangle = disentangle + self.annealing_method = annealing_method + self.annealing_start = annealing_start + self.annealing_step = annealing_step + self.eps = 1e-10 + + def kl_divergence(self, alpha): + beta = torch.ones([1, self.num_classes], dtype=torch.float32).to(alpha.device) + S_alpha = torch.sum(alpha, dim=1, keepdim=True) + S_beta = torch.sum(beta, dim=1, keepdim=True) + lnB = torch.lgamma(S_alpha) - \ + torch.sum(torch.lgamma(alpha), dim=1, keepdim=True) + lnB_uni = torch.sum(torch.lgamma(beta), dim=1, + keepdim=True) - torch.lgamma(S_beta) + + dg0 = torch.digamma(S_alpha) + dg1 = torch.digamma(alpha) + + kl = torch.sum((alpha - beta) * (dg1 - dg0), dim=1, + keepdim=True) + lnB + lnB_uni + return kl + + def loglikelihood_loss(self, y, alpha): + S = torch.sum(alpha, dim=1, keepdim=True) + loglikelihood_err = torch.sum( + (y - (alpha / S)) ** 2, dim=1, keepdim=True) + loglikelihood_var = torch.sum( + alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True) + return loglikelihood_err, loglikelihood_var + + def mse_loss(self, y, alpha, annealing_coef): + """Used only for loss_type == 'mse' + y: the one-hot labels (batchsize, num_classes) + alpha: the predictions (batchsize, num_classes) + epoch_num: the current training epoch + """ + losses = {} + loglikelihood_err, loglikelihood_var = self.loglikelihood_loss(y, alpha) + losses.update({'loss_cls': loglikelihood_err, 'loss_var': loglikelihood_var}) + + losses.update({'lambda': annealing_coef}) + if self.with_kldiv: + kl_alpha = (alpha - 1) * (1 - y) + 1 + kl_div = annealing_coef * \ + self.kl_divergence(kl_alpha) + losses.update({'loss_kl': kl_div}) + + if self.with_avuloss: + S = torch.sum(alpha, dim=1, keepdim=True) # Dirichlet strength + pred_score = alpha / S + uncertainty = self.num_classes / S + # avu_loss = annealing_coef * + return losses + + def ce_loss(self, target, y, alpha, annealing_coef): + """Used only for loss_type == 'ce' + target: the scalar labels (batchsize,) + alpha: the predictions (batchsize, num_classes), alpha >= 1 + epoch_num: the current training epoch + """ + losses = {} + # (1) the classification loss term + S = torch.sum(alpha, dim=1, keepdim=True) + pred_score = alpha / S + loss_cls = F.nll_loss(torch.log(pred_score), target, reduction='none') + losses.update({'loss_cls': loss_cls}) + + # (2) the likelihood variance term + loglikelihood_var = torch.sum( + alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True) + losses.update({'loss_var': loglikelihood_var}) + + # (3) the KL divergence term + kl_alpha = (alpha - 1) * (1 - y) + 1 + kl_div = annealing_coef * \ + self.kl_divergence(kl_alpha) + losses.update({'loss_kl': kl_div, 'lambda': annealing_coef}) + return losses + + def edl_loss(self, func, y, alpha, annealing_coef, target): + """Used for both loss_type == 'log' and loss_type == 'digamma' + func: function handler (torch.log, or torch.digamma) + y: the one-hot labels (batchsize, num_classes) + alpha: the predictions (batchsize, num_classes) + epoch_num: the current training epoch + """ + losses = {} + S = torch.sum(alpha, dim=1, keepdim=True) + A = torch.sum(y * (func(S) - func(alpha)), dim=1, keepdim=True) + losses.update({'loss_cls': A}) + + losses.update({'lambda': annealing_coef}) + if self.with_kldiv: + kl_alpha = (alpha - 1) * (1 - y) + 1 + kl_div = annealing_coef * \ + self.kl_divergence(kl_alpha) + losses.update({'loss_kl': kl_div}) + + if self.with_avuloss: + pred_scores, pred_cls = torch.max(alpha / S, 1, keepdim=True) + uncertainty = self.num_classes / S + acc_match = torch.reshape(torch.eq(pred_cls, target.unsqueeze(1)).float(), (-1, 1)) + if self.disentangle: + acc_uncertain = - torch.log(pred_scores * (1 - uncertainty) + self.eps) + inacc_certain = - torch.log((1 - pred_scores) * uncertainty + self.eps) + else: + acc_uncertain = - pred_scores * torch.log(1 - uncertainty + self.eps) + inacc_certain = - (1 - pred_scores) * torch.log(uncertainty + self.eps) + avu_loss = annealing_coef * acc_match * acc_uncertain + (1 - annealing_coef) * (1 - acc_match) * inacc_certain + losses.update({'loss_avu': avu_loss}) + return losses + + def compute_annealing_coef(self, **kwargs): + assert 'epoch' in kwargs, "epoch number is missing!" + assert 'total_epoch' in kwargs, "total epoch number is missing!" + epoch_num, total_epoch = kwargs['epoch'], kwargs['total_epoch'] + # annealing coefficient + if self.annealing_method == 'step': + annealing_coef = torch.min(torch.tensor( + 1.0, dtype=torch.float32), torch.tensor(epoch_num / self.annealing_step, dtype=torch.float32)) + elif self.annealing_method == 'exp': + annealing_start = torch.tensor(self.annealing_start, dtype=torch.float32) + annealing_coef = annealing_start * torch.exp(-torch.log(annealing_start) / total_epoch * epoch_num) + else: + raise NotImplementedError + return annealing_coef + + def _forward(self, output, target, **kwargs): + """Forward function. + Args: + output (torch.Tensor): The class score (before softmax). + target (torch.Tensor): The ground truth label. + epoch_num: The number of epochs during training. + Returns: + torch.Tensor: The returned EvidenceLoss loss. + """ + # get evidence + if self.evidence == 'relu': + evidence = relu_evidence(output) + elif self.evidence == 'exp': + evidence = exp_evidence(output) + elif self.evidence == 'softplus': + evidence = softplus_evidence(output) + else: + raise NotImplementedError + alpha = evidence + 1 + + # one-hot embedding for the target + y = torch.eye(self.num_classes).to(output.device) + y = y[target] + # compute annealing coefficient + annealing_coef = self.compute_annealing_coef(**kwargs) + + # compute the EDL loss + if self.loss_type == 'mse': + results = self.mse_loss(y, alpha, annealing_coef) + elif self.loss_type == 'log': + results = self.edl_loss(torch.log, y, alpha, annealing_coef, target) + elif self.loss_type == 'digamma': + results = self.edl_loss(torch.digamma, y, alpha, annealing_coef, target) + elif self.loss_type == 'cross_entropy': + results = self.ce_loss(target, y, alpha, annealing_coef) + else: + raise NotImplementedError + + # compute uncertainty and evidence + _, preds = torch.max(output, 1) + match = torch.reshape(torch.eq(preds, target).float(), (-1, 1)) + uncertainty = self.num_classes / torch.sum(alpha, dim=1, keepdim=True) + total_evidence = torch.sum(evidence, 1, keepdim=True) + evidence_succ = torch.sum(total_evidence * match) / torch.sum(match + 1e-20) + evidence_fail = torch.sum(total_evidence * (1 - match)) / (torch.sum(torch.abs(1 - match)) + 1e-20) + results.update({'uncertainty': uncertainty, + 'evidence_succ': evidence_succ, + 'evidence_fail': evidence_fail}) + + return results \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/gcp_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/gcp_loss.py new file mode 100644 index 0000000..ab0a8dc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/gcp_loss.py @@ -0,0 +1,46 @@ +import torch +import torch.nn.functional as F + +from ..registry import LOSSES +from .base import BaseWeightedLoss +from ...core import top_k_accuracy + + +@LOSSES.register_module() +class GCPLoss(BaseWeightedLoss): + """Reciprocal Point Learning Loss.""" + def __init__(self, temperature=1, weight_pl=0.1, radius_init=1): + super().__init__() + self.temperature = temperature + self.weight_pl = weight_pl + + def _forward(self, head_outs, labels, **kwargs): + """Forward function. + + Args: + head_outs (torch.Tensor): outputs of the RPL head + label (torch.Tensor): The ground truth label. + kwargs: Any keyword argument to be used to calculate + CrossEntropy loss. + + Returns: + torch.Tensor: The returned CrossEntropy loss. + """ + dist, feature, centers = head_outs['dist'], head_outs['feature'], head_outs['centers'] + if labels.shape == torch.Size([]): + labels = labels.unsqueeze(0) + # compute losses + logits = F.softmax(dist, dim=1) + loss_closed = F.cross_entropy(dist / self.temperature, labels, **kwargs) + center_batch = centers[labels, :] + loss_r = F.mse_loss(feature, center_batch) / 2 + # gather losses + losses = {'loss_cls': loss_closed, 'loss_open': self.weight_pl * loss_r} + + # compute top-K accuracy using CPU numpy + top_k_acc = top_k_accuracy(logits.detach().cpu().numpy(), + labels.detach().cpu().numpy(), (1, 5)) + losses.update({'top1_acc': torch.tensor(top_k_acc[0], device=dist.device)}) + losses.update({'top5_acc': torch.tensor(top_k_acc[1], device=dist.device)}) + + return losses \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/hvu_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/hvu_loss.py new file mode 100644 index 0000000..9d9b005 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/hvu_loss.py @@ -0,0 +1,141 @@ +import torch +import torch.nn.functional as F + +from ..registry import LOSSES +from .base import BaseWeightedLoss + + +@LOSSES.register_module() +class HVULoss(BaseWeightedLoss): + """Calculate the BCELoss for HVU. + + Args: + categories (tuple[str]): Names of tag categories, tags are organized in + this order. Default: ['action', 'attribute', 'concept', 'event', + 'object', 'scene']. + category_nums (tuple[int]): Number of tags for each category. Default: + (739, 117, 291, 69, 1678, 248). + category_loss_weights (tuple[float]): Loss weights of categories, it + applies only if `loss_type == 'individual'`. The loss weights will + be normalized so that the sum equals to 1, so that you can give any + positive number as loss weight. Default: (1, 1, 1, 1, 1, 1). + loss_type (str): The loss type we calculate, we can either calculate + the BCELoss for all tags, or calculate the BCELoss for tags in each + category. Choices are 'individual' or 'all'. Default: 'all'. + with_mask (bool): Since some tag categories are missing for some video + clips. If `with_mask == True`, we will not calculate loss for these + missing categories. Otherwise, these missing categories are treated + as negative samples. + reduction (str): Reduction way. Choices are 'mean' or 'sum'. Default: + 'mean'. + loss_weight (float): The loss weight. Default: 1.0. + """ + + def __init__(self, + categories=('action', 'attribute', 'concept', 'event', + 'object', 'scene'), + category_nums=(739, 117, 291, 69, 1678, 248), + category_loss_weights=(1, 1, 1, 1, 1, 1), + loss_type='all', + with_mask=False, + reduction='mean', + loss_weight=1.0): + + super().__init__(loss_weight) + self.categories = categories + self.category_nums = category_nums + self.category_loss_weights = category_loss_weights + assert len(self.category_nums) == len(self.category_loss_weights) + for category_loss_weight in self.category_loss_weights: + assert category_loss_weight >= 0 + self.loss_type = loss_type + self.with_mask = with_mask + self.reduction = reduction + self.category_startidx = [0] + for i in range(len(self.category_nums) - 1): + self.category_startidx.append(self.category_startidx[-1] + + self.category_nums[i]) + assert self.loss_type in ['individual', 'all'] + assert self.reduction in ['mean', 'sum'] + + def _forward(self, cls_score, label, mask, category_mask): + """Forward function. + + Args: + cls_score (torch.Tensor): The class score. + label (torch.Tensor): The ground truth label. + mask (torch.Tensor): The mask of tags. 0 indicates that the + category of this tag is missing in the label of the video. + category_mask (torch.Tensor): The category mask. For each sample, + it's a tensor with length `len(self.categories)`, denotes that + if the category is labeled for this video. + + Returns: + torch.Tensor: The returned CrossEntropy loss. + """ + + if self.loss_type == 'all': + loss_cls = F.binary_cross_entropy_with_logits( + cls_score, label, reduction='none') + if self.with_mask: + w_loss_cls = mask * loss_cls + w_loss_cls = torch.sum(w_loss_cls, dim=1) + if self.reduction == 'mean': + w_loss_cls = w_loss_cls / torch.sum(mask, dim=1) + w_loss_cls = torch.mean(w_loss_cls) + return dict(loss_cls=w_loss_cls) + + if self.reduction == 'sum': + loss_cls = torch.sum(loss_cls, dim=-1) + return dict(loss_cls=torch.mean(loss_cls)) + + if self.loss_type == 'individual': + losses = {} + loss_weights = {} + for name, num, start_idx in zip(self.categories, + self.category_nums, + self.category_startidx): + category_score = cls_score[:, start_idx:start_idx + num] + category_label = label[:, start_idx:start_idx + num] + category_loss = F.binary_cross_entropy_with_logits( + category_score, category_label, reduction='none') + if self.reduction == 'mean': + category_loss = torch.mean(category_loss, dim=1) + elif self.reduction == 'sum': + category_loss = torch.sum(category_loss, dim=1) + + idx = self.categories.index(name) + if self.with_mask: + category_mask_i = category_mask[:, idx].reshape(-1) + # there should be at least one sample which contains tags + # in thie category + if torch.sum(category_mask_i) < 0.5: + losses[f'{name}_LOSS'] = torch.tensor(.0).cuda() + loss_weights[f'{name}_LOSS'] = .0 + continue + category_loss = torch.sum(category_loss * category_mask_i) + category_loss = category_loss / torch.sum(category_mask_i) + else: + category_loss = torch.mean(category_loss) + # We name the loss of each category as 'LOSS', since we only + # want to monitor them, not backward them. We will also provide + # the loss used for backward in the losses dictionary + losses[f'{name}_LOSS'] = category_loss + loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx] + loss_weight_sum = sum(loss_weights.values()) + loss_weights = { + k: v / loss_weight_sum + for k, v in loss_weights.items() + } + loss_cls = sum([losses[k] * loss_weights[k] for k in losses]) + losses['loss_cls'] = loss_cls + # We also trace the loss weights + losses.update({ + k + '_weight': torch.tensor(v).to(losses[k].device) + for k, v in loss_weights.items() + }) + # Note that the loss weights are just for reference. + return losses + else: + raise ValueError("loss_type should be 'all' or 'individual', " + f'but got {self.loss_type}') diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/nll_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/nll_loss.py new file mode 100644 index 0000000..1c28537 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/nll_loss.py @@ -0,0 +1,26 @@ +import torch.nn.functional as F + +from ..registry import LOSSES +from .base import BaseWeightedLoss + + +@LOSSES.register_module() +class NLLLoss(BaseWeightedLoss): + """NLL Loss. + + It will calculate NLL loss given cls_score and label. + """ + + def _forward(self, cls_score, label, **kwargs): + """Forward function. + + Args: + cls_score (torch.Tensor): The class score. + label (torch.Tensor): The ground truth label. + kwargs: Any keyword argument to be used to calculate nll loss. + + Returns: + torch.Tensor: The returned nll loss. + """ + loss_cls = F.nll_loss(cls_score, label, **kwargs) + return loss_cls diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ohem_hinge_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ohem_hinge_loss.py new file mode 100644 index 0000000..75086b2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ohem_hinge_loss.py @@ -0,0 +1,64 @@ +import torch + + +class OHEMHingeLoss(torch.autograd.Function): + """This class is the core implementation for the completeness loss in + paper. + + It compute class-wise hinge loss and performs online hard example mining + (OHEM). + """ + + @staticmethod + def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size): + """Calculate OHEM hinge loss. + + Args: + pred (torch.Tensor): Predicted completeness score. + labels (torch.Tensor): Groundtruth class label. + is_positive (int): Set to 1 when proposals are positive and + set to -1 when proposals are incomplete. + ohem_ratio (float): Ratio of hard examples. + group_size (int): Number of proposals sampled per video. + + Returns: + torch.Tensor: Returned class-wise hinge loss. + """ + num_samples = pred.size(0) + if num_samples != len(labels): + raise ValueError(f'Number of samples should be equal to that ' + f'of labels, but got {num_samples} samples and ' + f'{len(labels)} labels.') + + losses = torch.zeros(num_samples, device=pred.device) + slopes = torch.zeros(num_samples, device=pred.device) + for i in range(num_samples): + losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1]) + slopes[i] = -is_positive if losses[i] != 0 else 0 + + losses = losses.view(-1, group_size).contiguous() + sorted_losses, indices = torch.sort(losses, dim=1, descending=True) + keep_length = int(group_size * ohem_ratio) + loss = torch.zeros(1, device=pred.device) + for i in range(losses.size(0)): + loss += sorted_losses[i, :keep_length].sum() + ctx.loss_index = indices[:, :keep_length] + ctx.labels = labels + ctx.slopes = slopes + ctx.shape = pred.size() + ctx.group_size = group_size + ctx.num_groups = losses.size(0) + return loss + + @staticmethod + def backward(ctx, grad_output): + labels = ctx.labels + slopes = ctx.slopes + + grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device) + for group in range(ctx.num_groups): + for idx in ctx.loss_index[group]: + loc = idx + group * ctx.group_size + grad_in[loc, labels[loc] - 1] = ( + slopes[loc] * grad_output.data[0]) + return torch.autograd.Variable(grad_in), None, None, None, None diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rebias_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rebias_loss.py new file mode 100644 index 0000000..206a7e2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rebias_loss.py @@ -0,0 +1,92 @@ +import torch +import torch.nn.functional as F +import numpy as np + +from ..registry import LOSSES +from .base import BaseWeightedLoss + + +@LOSSES.register_module() +class RebiasLoss(BaseWeightedLoss): + """Rebias Loss.""" + def __init__(self, lambda_g=1.0, criteria='hsic'): + super().__init__() + self.lambda_g = lambda_g + self.criteria = criteria + + def _kernel(self, X, sigma): + X = X.view(len(X), -1) + XX = X @ X.t() + X_sqnorms = torch.diag(XX) + X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0) + gamma = 1 / (2 * sigma ** 2) + + kernel_XX = torch.exp(-gamma * X_L2) + return kernel_XX + + + def hsic_loss(self, input1, input2, unbiased=False): + N = len(input1) + if N < 4: + return torch.tensor(0.0).to(input1.device) + # we simply use the squared dimension of feature as the sigma for RBF kernel + sigma_x = np.sqrt(input1.size()[1]) + sigma_y = np.sqrt(input2.size()[1]) + + # compute the kernels + kernel_XX = self._kernel(input1, sigma_x) + kernel_YY = self._kernel(input2, sigma_y) + + if unbiased: + """Unbiased estimator of Hilbert-Schmidt Independence Criterion + Song, Le, et al. "Feature selection via dependence maximization." 2012. + """ + tK = kernel_XX - torch.diag(kernel_XX) + tL = kernel_YY - torch.diag(kernel_YY) + hsic = ( + torch.trace(tK @ tL) + + (torch.sum(tK) * torch.sum(tL) / (N - 1) / (N - 2)) + - (2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2)) + ) + loss = hsic / (N * (N - 3)) + else: + """Biased estimator of Hilbert-Schmidt Independence Criterion + Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. + """ + KH = kernel_XX - kernel_XX.mean(0, keepdim=True) + LH = kernel_YY - kernel_YY.mean(0, keepdim=True) + loss = torch.trace(KH @ LH / (N - 1) ** 2) + return loss + + def cosine_loss(self, input1, input2): + # normalize the inputs with L2-norm + norm1 = F.normalize(input1, dim=1, p=2) + norm2 = F.normalize(input2, dim=1, p=2) + # cosine distance + cos_batch = torch.bmm(norm1.unsqueeze(1), norm2.unsqueeze(2)).squeeze(-1).squeeze(-1) + loss = torch.mean(torch.abs(cos_batch)) + return loss + + + def _forward(self, x, xs, y, ys, label, **kwargs): + """Forward function. + Returns: + torch.Tensor: The returned Rebias loss. + """ + # L(f) + loss_f = F.cross_entropy(y, label, **kwargs) + # L(g) + loss_g = self.lambda_g * F.cross_entropy(ys, label, **kwargs) + losses = {'loss_f': loss_f, 'loss_g': loss_g} + # all losses + if self.criteria == 'hsic': + # negative HSIC loss + loss_hsic = - self.hsic_loss(x, xs) # small returned value means high dependency + losses.update({'loss_hsic': loss_hsic}) + elif self.criteria == 'cosine': + loss_cos = self.cosine_loss(x, xs) # large returned value means high dependency + losses.update({'loss_cos': loss_cos}) + else: + raise NotImplementedError + + return losses \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rpl_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rpl_loss.py new file mode 100644 index 0000000..b5a6172 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/rpl_loss.py @@ -0,0 +1,49 @@ +import torch +import torch.nn.functional as F + +from ..registry import LOSSES +from .base import BaseWeightedLoss +from ...core import top_k_accuracy + + +@LOSSES.register_module() +class RPLoss(BaseWeightedLoss): + """Reciprocal Point Learning Loss.""" + def __init__(self, temperature=1, weight_pl=0.1, radius_init=1): + super().__init__() + self.temperature = temperature + self.weight_pl = weight_pl + self.radius = torch.nn.Parameter(torch.Tensor(radius_init)) + self.radius.data.fill_(0) + + def _forward(self, head_outs, labels, **kwargs): + """Forward function. + + Args: + head_outs (torch.Tensor): outputs of the RPL head + label (torch.Tensor): The ground truth label. + kwargs: Any keyword argument to be used to calculate + CrossEntropy loss. + + Returns: + torch.Tensor: The returned CrossEntropy loss. + """ + dist, feature, centers = head_outs['dist'], head_outs['feature'], head_outs['centers'] + if labels.shape == torch.Size([]): + labels = labels.unsqueeze(0) + # compute losses + logits = F.softmax(dist, dim=1) + loss_closed = F.cross_entropy(dist / self.temperature, labels, **kwargs) + center_batch = centers[labels, :] + _dis = (feature - center_batch).pow(2).mean(1) + loss_r = F.mse_loss(feature, self.radius) / 2 + # gather losses + losses = {'loss_cls': loss_closed, 'loss_open': self.weight_pl * loss_r} + + # compute top-K accuracy using CPU numpy + top_k_acc = top_k_accuracy(logits.detach().cpu().numpy(), + labels.detach().cpu().numpy(), (1, 5)) + losses.update({'top1_acc': torch.tensor(top_k_acc[0], device=dist.device)}) + losses.update({'top5_acc': torch.tensor(top_k_acc[1], device=dist.device)}) + + return losses \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ssn_loss.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ssn_loss.py new file mode 100644 index 0000000..492dc8d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/losses/ssn_loss.py @@ -0,0 +1,179 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES +from .ohem_hinge_loss import OHEMHingeLoss + + +@LOSSES.register_module() +class SSNLoss(nn.Module): + + @staticmethod + def activity_loss(activity_score, labels, activity_indexer): + """Activity Loss. + + It will calculate activity loss given activity_score and label. + + Args: + activity_score (torch.Tensor): Predicted activity score. + labels (torch.Tensor): Groundtruth class label. + activity_indexer (torch.Tensor): Index slices of proposals. + + Returns: + torch.Tensor: Returned cross entropy loss. + """ + pred = activity_score[activity_indexer, :] + gt = labels[activity_indexer] + return F.cross_entropy(pred, gt) + + @staticmethod + def completeness_loss(completeness_score, + labels, + completeness_indexer, + positive_per_video, + incomplete_per_video, + ohem_ratio=0.17): + """Completeness Loss. + + It will calculate completeness loss given completeness_score and label. + + Args: + completeness_score (torch.Tensor): Predicted completeness score. + labels (torch.Tensor): Groundtruth class label. + completeness_indexer (torch.Tensor): Index slices of positive and + incomplete proposals. + positive_per_video (int): Number of positive proposals sampled + per video. + incomplete_per_video (int): Number of incomplete proposals sampled + pre video. + ohem_ratio (float): Ratio of online hard example mining. + Default: 0.17. + + Returns: + torch.Tensor: Returned class-wise completeness loss. + """ + pred = completeness_score[completeness_indexer, :] + gt = labels[completeness_indexer] + + pred_dim = pred.size(1) + pred = pred.view(-1, positive_per_video + incomplete_per_video, + pred_dim) + gt = gt.view(-1, positive_per_video + incomplete_per_video) + + # yapf:disable + positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501 + incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501 + # yapf:enable + + positive_loss = OHEMHingeLoss.apply( + positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1, + 1.0, positive_per_video) + incomplete_loss = OHEMHingeLoss.apply( + incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), + -1, ohem_ratio, incomplete_per_video) + num_positives = positive_pred.size(0) + num_incompletes = int(incomplete_pred.size(0) * ohem_ratio) + + return ((positive_loss + incomplete_loss) / + float(num_positives + num_incompletes)) + + @staticmethod + def classwise_regression_loss(bbox_pred, labels, bbox_targets, + regression_indexer): + """Classwise Regression Loss. + + It will calculate classwise_regression loss given + class_reg_pred and targets. + + Args: + bbox_pred (torch.Tensor): Predicted interval center and span + of positive proposals. + labels (torch.Tensor): Groundtruth class label. + bbox_targets (torch.Tensor): Groundtruth center and span + of positive proposals. + regression_indexer (torch.Tensor): Index slices of + positive proposals. + + Returns: + torch.Tensor: Returned class-wise regression loss. + """ + pred = bbox_pred[regression_indexer, :, :] + gt = labels[regression_indexer] + reg_target = bbox_targets[regression_indexer, :] + + class_idx = gt.data - 1 + classwise_pred = pred[:, class_idx, :] + classwise_reg_pred = torch.cat( + (torch.diag(classwise_pred[:, :, 0]).view( + -1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)), + dim=1) + loss = F.smooth_l1_loss( + classwise_reg_pred.view(-1), reg_target.view(-1)) * 2 + return loss + + def forward(self, activity_score, completeness_score, bbox_pred, + proposal_type, labels, bbox_targets, train_cfg): + """Calculate Boundary Matching Network Loss. + + Args: + activity_score (torch.Tensor): Predicted activity score. + completeness_score (torch.Tensor): Predicted completeness score. + bbox_pred (torch.Tensor): Predicted interval center and span + of positive proposals. + proposal_type (torch.Tensor): Type index slices of proposals. + labels (torch.Tensor): Groundtruth class label. + bbox_targets (torch.Tensor): Groundtruth center and span + of positive proposals. + train_cfg (dict): Config for training. + + Returns: + dict([torch.Tensor, torch.Tensor, torch.Tensor]): + (loss_activity, loss_completeness, loss_reg). + Loss_activity is the activity loss, loss_completeness is + the class-wise completeness loss, + loss_reg is the class-wise regression loss. + """ + self.sampler = train_cfg.ssn.sampler + self.loss_weight = train_cfg.ssn.loss_weight + losses = dict() + + proposal_type = proposal_type.view(-1) + labels = labels.view(-1) + activity_indexer = ((proposal_type == 0) + + (proposal_type == 2)).nonzero().squeeze(1) + completeness_indexer = ((proposal_type == 0) + + (proposal_type == 1)).nonzero().squeeze(1) + + total_ratio = ( + self.sampler.positive_ratio + self.sampler.background_ratio + + self.sampler.incomplete_ratio) + positive_per_video = int(self.sampler.num_per_video * + (self.sampler.positive_ratio / total_ratio)) + background_per_video = int( + self.sampler.num_per_video * + (self.sampler.background_ratio / total_ratio)) + incomplete_per_video = ( + self.sampler.num_per_video - positive_per_video - + background_per_video) + + losses['loss_activity'] = self.activity_loss(activity_score, labels, + activity_indexer) + + losses['loss_completeness'] = self.completeness_loss( + completeness_score, + labels, + completeness_indexer, + positive_per_video, + incomplete_per_video, + ohem_ratio=positive_per_video / incomplete_per_video) + losses['loss_completeness'] *= self.loss_weight.comp_loss_weight + + if bbox_pred is not None: + regression_indexer = (proposal_type == 0).nonzero().squeeze(1) + bbox_targets = bbox_targets.view(-1, 2) + losses['loss_reg'] = self.classwise_regression_loss( + bbox_pred, labels, bbox_targets, regression_indexer) + losses['loss_reg'] *= self.loss_weight.reg_loss_weight + + return losses diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/necks/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/necks/__init__.py new file mode 100644 index 0000000..76535d5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/necks/__init__.py @@ -0,0 +1,3 @@ +from .tpn import TPN + +__all__ = ['TPN'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/necks/tpn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/necks/tpn.py new file mode 100644 index 0000000..f1bfea5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/necks/tpn.py @@ -0,0 +1,400 @@ +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, constant_init, normal_init, xavier_init +from ..heads import AuxHead, RebiasHead +from ..registry import NECKS + + +class Identity(nn.Module): + """Identity mapping.""" + + def forward(self, x): + return x + + +class DownSample(nn.Module): + """DownSample modules. + + It uses convolution and maxpooling to downsample the input feature, + and specifies downsample position to determine `pool-conv` or `conv-pool`. + + Args: + in_channels (int): Channel number of input features. + out_channels (int): Channel number of output feature. + kernel_size (int | tuple[int]): Same as :class:`ConvModule`. + Default: (3, 1, 1). + stride (int | tuple[int]): Same as :class:`ConvModule`. + Default: (1, 1, 1). + padding (int | tuple[int]): Same as :class:`ConvModule`. + Default: (1, 0, 0). + groups (int): Same as :class:`ConvModule`. Default: 1. + bias (bool | str): Same as :class:`ConvModule`. Default: False. + conv_cfg (dict | None): Same as :class:`ConvModule`. + Default: dict(type='Conv3d'). + norm_cfg (dict | None): Same as :class:`ConvModule`. Default: None. + act_cfg (dict | None): Same as :class:`ConvModule`. Default: None. + downsample_position (str): Type of downsample position. Options are + 'before' and 'after'. Default: 'after'. + downsample_scale (int | tuple[int]): downsample scale for maxpooling. + It will be used for kernel size and stride of maxpooling. + Default: (1, 2, 2). + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=(3, 1, 1), + stride=(1, 1, 1), + padding=(1, 0, 0), + groups=1, + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=None, + act_cfg=None, + downsample_position='after', + downsample_scale=(1, 2, 2)): + super().__init__() + self.conv = ConvModule( + in_channels, + out_channels, + kernel_size, + stride, + padding, + groups=groups, + bias=bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + assert downsample_position in ['before', 'after'] + self.downsample_position = downsample_position + self.pool = nn.MaxPool3d( + downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True) + + def forward(self, x): + if self.downsample_position == 'before': + x = self.pool(x) + x = self.conv(x) + else: + x = self.conv(x) + x = self.pool(x) + return x + + +class LevelFusion(nn.Module): + """Level Fusion module. + + This module is used to aggregate the hierarchical features dynamic in + visual tempos and consistent in spatial semantics. The top/bottom features + for top-down/bottom-up flow would be combined to achieve two additional + options, namely 'Cascade Flow' or 'Parallel Flow'. While applying a + bottom-up flow after a top-down flow will lead to the cascade flow, + applying them simultaneously will result in the parallel flow. + + Args: + in_channels (tuple[int]): Channel numbers of input features tuple. + mid_channels (tuple[int]): Channel numbers of middle features tuple. + out_channels (int): Channel numbers of output features. + downsample_scales (tuple[int | tuple[int]]): downsample scales for + each :class:`DownSample` module. Default: ((1, 1, 1), (1, 1, 1)). + """ + + def __init__(self, + in_channels, + mid_channels, + out_channels, + downsample_scales=((1, 1, 1), (1, 1, 1))): + super().__init__() + num_stages = len(in_channels) + + self.downsamples = nn.ModuleList() + for i in range(num_stages): + downsample = DownSample( + in_channels[i], + mid_channels[i], + kernel_size=(1, 1, 1), + stride=(1, 1, 1), + bias=False, + padding=(0, 0, 0), + groups=32, + norm_cfg=dict(type='BN3d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True), + downsample_position='before', + downsample_scale=downsample_scales[i]) + self.downsamples.append(downsample) + + self.fusion_conv = ConvModule( + sum(mid_channels), + out_channels, + 1, + stride=1, + padding=0, + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True)) + + def forward(self, x): + out = [self.downsamples[i](feature) for i, feature in enumerate(x)] + out = torch.cat(out, 1) + out = self.fusion_conv(out) + + return out + + +class SpatialModulation(nn.Module): + """Spatial Semantic Modulation. + + This module is used to align spatial semantics of features in the + multi-depth pyramid. For each but the top-level feature, a stack + of convolutions with level-specific stride are applied to it, matching + its spatial shape and receptive field with the top one. + + Args: + in_channels (tuple[int]): Channel numbers of input features tuple. + out_channels (int): Channel numbers of output features tuple. + """ + + def __init__(self, in_channels, out_channels): + super().__init__() + + self.spatial_modulation = nn.ModuleList() + for channel in in_channels: + downsample_scale = out_channels // channel + downsample_factor = int(np.log2(downsample_scale)) + op = nn.ModuleList() + if downsample_factor < 1: + op = Identity() + else: + for factor in range(downsample_factor): + in_factor = 2**factor + out_factor = 2**(factor + 1) + op.append( + ConvModule( + channel * in_factor, + channel * out_factor, (1, 3, 3), + stride=(1, 2, 2), + padding=(0, 1, 1), + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True), + act_cfg=dict(type='ReLU', inplace=True))) + self.spatial_modulation.append(op) + + def forward(self, x): + out = [] + for i, _ in enumerate(x): + if isinstance(self.spatial_modulation[i], nn.ModuleList): + out_ = x[i] + for op in self.spatial_modulation[i]: + out_ = op(out_) + out.append(out_) + else: + out.append(self.spatial_modulation[i](x[i])) + return out + + +class TemporalModulation(nn.Module): + """Temporal Rate Modulation. + + The module is used to equip TPN with a similar flexibility for temporal + tempo modulation as in the input-level frame pyramid. + + Args: + in_channels (int): Channel number of input features. + out_channels (int): Channel number of output features. + downsample_scale (int): Downsample scale for maxpooling. Default: 8. + """ + + def __init__(self, in_channels, out_channels, downsample_scale=8): + super().__init__() + + self.conv = ConvModule( + in_channels, + out_channels, (3, 1, 1), + stride=(1, 1, 1), + padding=(1, 0, 0), + bias=False, + groups=32, + conv_cfg=dict(type='Conv3d'), + act_cfg=None) + self.pool = nn.MaxPool3d((downsample_scale, 1, 1), + (downsample_scale, 1, 1), (0, 0, 0), + ceil_mode=True) + + def forward(self, x): + x = self.conv(x) + x = self.pool(x) + return x + + +@NECKS.register_module() +class TPN(nn.Module): + """TPN neck. + + This module is proposed in `Temporal Pyramid Network for Action Recognition + `_ + + Args: + in_channels (tuple[int]): Channel numbers of input features tuple. + out_channels (int): Channel number of output feature. + spatial_modulation_cfg (dict | None): Config for spatial modulation + layers. Required keys are `in_channels` and `out_channels`. + Default: None. + temporal_modulation_cfg (dict | None): Config for temporal modulation + layers. Default: None. + upsample_cfg (dict | None): Config for upsample layers. The keys are + same as that in :class:``nn.Upsample``. Default: None. + downsample_cfg (dict | None): Config for downsample layers. + Default: None. + level_fusion_cfg (dict | None): Config for level fusion layers. + Required keys are 'in_channels', 'mid_channels', 'out_channels'. + Default: None. + aux_head_cfg (dict | None): Config for aux head layers. + Required keys are 'out_channels'. Default: None. + flow_type (str): Flow type to combine the features. Options are + 'cascade' and 'parallel'. Default: 'cascade'. + """ + + def __init__(self, + in_channels, + out_channels, + spatial_modulation_cfg=None, + temporal_modulation_cfg=None, + upsample_cfg=None, + downsample_cfg=None, + level_fusion_cfg=None, + aux_head_cfg=None, + rebias_head_cfg=None, + flow_type='cascade'): + super().__init__() + assert isinstance(in_channels, tuple) + assert isinstance(out_channels, int) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_tpn_stages = len(in_channels) + + assert spatial_modulation_cfg is None or isinstance( + spatial_modulation_cfg, dict) + assert temporal_modulation_cfg is None or isinstance( + temporal_modulation_cfg, dict) + assert upsample_cfg is None or isinstance(upsample_cfg, dict) + assert downsample_cfg is None or isinstance(downsample_cfg, dict) + assert aux_head_cfg is None or isinstance(aux_head_cfg, dict) + assert rebias_head_cfg is None or isinstance(rebias_head_cfg, dict) + assert level_fusion_cfg is None or isinstance(level_fusion_cfg, dict) + + if flow_type not in ['cascade', 'parallel']: + raise ValueError( + f"flow type in TPN should be 'cascade' or 'parallel', " + f'but got {flow_type} instead.') + self.flow_type = flow_type + + self.temporal_modulation_ops = nn.ModuleList() + self.upsample_ops = nn.ModuleList() + self.downsample_ops = nn.ModuleList() + + self.level_fusion_1 = LevelFusion(**level_fusion_cfg) + self.spatial_modulation = SpatialModulation(**spatial_modulation_cfg) + + for i in range(self.num_tpn_stages): + + if temporal_modulation_cfg is not None: + downsample_scale = temporal_modulation_cfg[ + 'downsample_scales'][i] + temporal_modulation = TemporalModulation( + in_channels[-1], out_channels, downsample_scale) + self.temporal_modulation_ops.append(temporal_modulation) + + if i < self.num_tpn_stages - 1: + if upsample_cfg is not None: + upsample = nn.Upsample(**upsample_cfg) + self.upsample_ops.append(upsample) + + if downsample_cfg is not None: + downsample = DownSample(out_channels, out_channels, + **downsample_cfg) + self.downsample_ops.append(downsample) + + out_dims = level_fusion_cfg['out_channels'] + + # two pyramids + self.level_fusion_2 = LevelFusion(**level_fusion_cfg) + + self.pyramid_fusion = ConvModule( + out_dims * 2, + 2048, + 1, + stride=1, + padding=0, + bias=False, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', requires_grad=True)) + + if aux_head_cfg is not None: + self.aux_head = AuxHead(self.in_channels[-2], **aux_head_cfg) + else: + self.aux_head = None + if rebias_head_cfg is not None: + self.rebias_head = RebiasHead(self.in_channels[-2], **rebias_head_cfg) + else: + self.rebias_head = None + self.init_weights() + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv3d): + xavier_init(m, distribution='uniform') + if isinstance(m, nn.BatchNorm3d): + constant_init(m, 1) + + if self.aux_head is not None: + self.aux_head.init_weights() + + if self.rebias_head is not None: + self.rebias_head.init_weights() + + + def forward(self, x, target=None): + loss_aux = dict() + + # Auxiliary loss + if self.aux_head is not None: + loss_aux = self.aux_head(x[-2], target) + if self.rebias_head is not None: + loss_rebias = self.rebias_head(x[-2], target) + loss_aux.update(loss_rebias) + + # Spatial Modulation + spatial_modulation_outs = self.spatial_modulation(x) + + # Temporal Modulation + temporal_modulation_outs = [] + for i, temporal_modulation in enumerate(self.temporal_modulation_ops): + temporal_modulation_outs.append( + temporal_modulation(spatial_modulation_outs[i])) + + outs = [out.clone() for out in temporal_modulation_outs] + if len(self.upsample_ops) != 0: + for i in range(self.num_tpn_stages - 1, 0, -1): + outs[i - 1] = outs[i - 1] + self.upsample_ops[i - 1](outs[i]) + + # Get top-down outs + top_down_outs = self.level_fusion_1(outs) + + # Build bottom-up flow using downsample operation + if self.flow_type == 'parallel': + outs = [out.clone() for out in temporal_modulation_outs] + if len(self.downsample_ops) != 0: + for i in range(self.num_tpn_stages - 1): + outs[i + 1] = outs[i + 1] + self.downsample_ops[i](outs[i]) + + # Get bottom-up outs + botton_up_outs = self.level_fusion_2(outs) + + # fuse two pyramid outs + outs = self.pyramid_fusion( + torch.cat([top_down_outs, botton_up_outs], 1)) + + return outs, loss_aux diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/__init__.py new file mode 100644 index 0000000..4245461 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/__init__.py @@ -0,0 +1,10 @@ +from .audio_recognizer import AudioRecognizer +from .base import BaseRecognizer +from .recognizer2d import Recognizer2D +from .recognizer3d import Recognizer3D +from .recognizer2d_bnn import Recognizer2DBNN +from .recognizer3d_bnn import Recognizer3DBNN +from .recognizer2d_rpl import Recognizer2DRPL +from .recognizer3d_rpl import Recognizer3DRPL + +__all__ = ['BaseRecognizer', 'Recognizer2D', 'Recognizer3D', 'Recognizer2DBNN', 'Recognizer3DBNN', 'Recognizer2DRPL', 'Recognizer3DRPL', 'AudioRecognizer'] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/audio_recognizer.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/audio_recognizer.py new file mode 100644 index 0000000..a9b431c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/audio_recognizer.py @@ -0,0 +1,101 @@ +from ..registry import RECOGNIZERS +from .base import BaseRecognizer + + +@RECOGNIZERS.register_module() +class AudioRecognizer(BaseRecognizer): + """Audio recognizer model framework.""" + + def forward(self, audios, label=None, return_loss=True): + """Define the computation performed at every call.""" + if return_loss: + if label is None: + raise ValueError('Label should not be None.') + return self.forward_train(audios, label) + + return self.forward_test(audios) + + def forward_train(self, audios, labels): + """Defines the computation performed at every call when training.""" + audios = audios.reshape((-1, ) + audios.shape[2:]) + x = self.extract_feat(audios) + cls_score = self.cls_head(x) + gt_labels = labels.squeeze() + loss = self.cls_head.loss(cls_score, gt_labels) + + return loss + + def forward_test(self, audios): + """Defines the computation performed at every call when evaluation and + testing.""" + num_segs = audios.shape[1] + audios = audios.reshape((-1, ) + audios.shape[2:]) + x = self.extract_feat(audios) + cls_score = self.cls_head(x) + cls_score = self.average_clip(cls_score, num_segs) + + return cls_score.cpu().numpy() + + def forward_gradcam(self, audios): + raise NotImplementedError + + def train_step(self, data_batch, optimizer, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data_batch (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, + ``num_samples``. + ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + ``log_vars`` contains all the variables to be sent to the + logger. + ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + audios = data_batch['audios'] + label = data_batch['label'] + + losses = self(audios, label) + + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(next(iter(data_batch.values())))) + + return outputs + + def val_step(self, data_batch, optimizer, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + audios = data_batch['audios'] + label = data_batch['label'] + + losses = self(audios, label) + + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(next(iter(data_batch.values())))) + + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/base.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/base.py new file mode 100644 index 0000000..17e4f0d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/base.py @@ -0,0 +1,267 @@ +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from mmcv.runner import auto_fp16 + +from .. import builder + + +class BaseRecognizer(nn.Module, metaclass=ABCMeta): + """Base class for recognizers. + + All recognizers should subclass it. + All subclass should overwrite: + + - Methods:``forward_train``, supporting to forward when training. + - Methods:``forward_test``, supporting to forward when testing. + + Args: + backbone (dict): Backbone modules to extract feature. + cls_head (dict): Classification head to process feature. + train_cfg (dict | None): Config for training. Default: None. + test_cfg (dict | None): Config for testing. Default: None. + """ + + def __init__(self, + backbone, + cls_head, + neck=None, + debias_head=None, + train_cfg=None, + test_cfg=None): + super().__init__() + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + self.cls_head = builder.build_head(cls_head) + if debias_head is not None: + self.debias_head = builder.build_head(debias_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + # aux_info is the list of tensor names beyond 'imgs' and 'label' which + # will be used in train_step and val_step, data_batch should contain + # these tensors + self.aux_info = [] + if train_cfg is not None and 'aux_info' in train_cfg: + self.aux_info = train_cfg['aux_info'] + + self.init_weights() + + self.fp16_enabled = False + + def init_weights(self): + """Initialize the model network weights.""" + self.backbone.init_weights() + self.cls_head.init_weights() + if hasattr(self, 'neck'): + self.neck.init_weights() + if hasattr(self, 'debias_head'): + self.debias_head.init_weights() + + @auto_fp16() + def extract_feat(self, imgs): + """Extract features through a backbone. + + Args: + imgs (torch.Tensor): The input images. + + Returns: + torch.tensor: The extracted features. + """ + x = self.backbone(imgs) + return x + + def evidence_to_prob(self, output, evidence_type): + if evidence_type == 'relu': + from ..losses.edl_loss import relu_evidence as evidence + elif evidence_type == 'exp': + from ..losses.edl_loss import exp_evidence as evidence + elif evidence_type == 'softplus': + from ..losses.edl_loss import softplus_evidence as evidence + alpha = evidence(output) + 1 + S = torch.sum(alpha, dim=-1, keepdim=True) + prob = alpha / S + return prob + + + def average_clip(self, cls_score, num_segs=1): + """Averaging class score over multiple clips. + + Using different averaging types ('score' or 'prob' or None, + which defined in test_cfg) to computed the final averaged + class score. Only called in test mode. + + Args: + cls_score (torch.Tensor): Class score to be averaged. + num_segs (int): Number of clips for each input sample. + + Returns: + torch.Tensor: Averaged class score. + """ + if 'average_clips' not in self.test_cfg.keys(): + raise KeyError('"average_clips" must defined in test_cfg\'s keys') + + average_clips = self.test_cfg['average_clips'] + if average_clips not in ['score', 'prob', 'evidence', None]: + raise ValueError(f'{average_clips} is not supported. ' + f'Currently supported ones are ' + f'["score", "prob", "evidence", None]') + + if average_clips is None: + return cls_score + + batch_size = cls_score.shape[0] + cls_score = cls_score.view(batch_size // num_segs, num_segs, -1) + + if average_clips == 'prob': + cls_score = F.softmax(cls_score, dim=2).mean(dim=1) + elif average_clips == 'score': + cls_score = cls_score.mean(dim=1) + elif average_clips == 'evidence': + assert 'evidence_type' in self.test_cfg.keys() + cls_score = self.evidence_to_prob(cls_score, self.test_cfg['evidence_type']) + cls_score = cls_score.mean(dim=1) + + return cls_score + + @abstractmethod + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + + @abstractmethod + def forward_test(self, imgs): + """Defines the computation performed at every call when evaluation and + testing.""" + + @abstractmethod + def forward_gradcam(self, imgs): + """Defines the computation performed at every all when using gradcam + utils.""" + + @staticmethod + def _parse_losses(losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary information. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor + which may be a weighted sum of all losses, log_vars contains + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone().to(log_vars['loss'].device) + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def forward(self, imgs, label=None, return_loss=True, **kwargs): + """Define the computation performed at every call.""" + if kwargs.get('gradcam', False): + del kwargs['gradcam'] + return self.forward_gradcam(imgs, **kwargs) + if kwargs.get('get_feat', False): + del kwargs['get_feat'] + return self.get_feat(imgs, **kwargs) + if return_loss: + if label is None: + raise ValueError('Label should not be None.') + return self.forward_train(imgs, label, **kwargs) + + return self.forward_test(imgs, **kwargs) + + def train_step(self, data_batch, optimizer, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data_batch (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, + ``num_samples``. + ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + ``log_vars`` contains all the variables to be sent to the + logger. + ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + imgs = data_batch['imgs'] + label = data_batch['label'] + + aux_info = {} + for item in self.aux_info: + assert item in data_batch + aux_info[item] = data_batch[item] + aux_info.update(kwargs) + + losses = self(imgs, label, return_loss=True, **aux_info) + + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(next(iter(data_batch.values())))) + + return outputs + + def val_step(self, data_batch, optimizer, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + imgs = data_batch['imgs'] + label = data_batch['label'] + + aux_info = {} + for item in self.aux_info: + aux_info[item] = data_batch[item] + + losses = self(imgs, label, return_loss=True, **aux_info) + + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(next(iter(data_batch.values())))) + + return outputs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d.py new file mode 100644 index 0000000..f6ead28 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d.py @@ -0,0 +1,124 @@ +from ..registry import RECOGNIZERS +from .base import BaseRecognizer + + +@RECOGNIZERS.register_module() +class Recognizer2D(BaseRecognizer): + """2D recognizer model framework.""" + + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + losses = dict() + + x = self.extract_feat(imgs) + + if hasattr(self, 'debias_head'): + loss_debias = self.debias_head(x, num_segs=num_segs, target=labels.squeeze(), **kwargs) + losses.update(loss_debias) + + if hasattr(self, 'neck'): + x = [ + each.reshape((-1, num_segs) + + each.shape[1:]).transpose(1, 2).contiguous() + for each in x + ] + x, _ = self.neck(x, labels.squeeze()) + x = x.squeeze(2) + num_segs = 1 + + cls_score = self.cls_head(x, num_segs) + gt_labels = labels.squeeze() + loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs) + losses.update(loss_cls) + + return losses + + def _do_test(self, imgs): + """Defines the computation performed at every call when evaluation, + testing and gradcam.""" + batches = imgs.shape[0] + + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + losses = dict() + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x = [ + each.reshape((-1, num_segs) + + each.shape[1:]).transpose(1, 2).contiguous() + for each in x + ] + x, loss_aux = self.neck(x) + x = x.squeeze(2) + losses.update(loss_aux) + num_segs = 1 + + # When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes] + # When using `TSMHead`, shape is [batch_size * num_crops, num_classes] + # `num_crops` is calculated by: + # 1) `twice_sample` in `SampleFrames` + # 2) `num_sample_positions` in `DenseSampleFrames` + # 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline` + # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` + cls_score = self.cls_head(x, num_segs) + + assert cls_score.size()[0] % batches == 0 + # calculate num_crops automatically + cls_score = self.average_clip(cls_score, + cls_score.size()[0] // batches) + + return cls_score + + def forward_test(self, imgs): + """Defines the computation performed at every call when evaluation and + testing.""" + return self._do_test(imgs).cpu().numpy() + + def forward_dummy(self, imgs): + """Used for computing network FLOPs. + + See ``tools/analysis/get_flops.py``. + + Args: + imgs (torch.Tensor): Input images. + + Returns: + Tensor: Class score. + """ + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + x = self.extract_feat(imgs) + outs = (self.cls_head(x, num_segs), ) + return outs + + def forward_gradcam(self, imgs): + """Defines the computation performed at every call when using gradcam + utils.""" + return self._do_test(imgs) + + def get_feat(self, imgs, return_score=False): + """Defines the computation performed at every call when using get_feat + utils.""" + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, _ = self.neck(x) + + if return_score: + cls_score = self.cls_head(x, num_segs) + assert cls_score.size()[0] % batches == 0 + cls_score = self.average_clip(cls_score, + cls_score.size()[0] // batches) + return x, cls_score + return x \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_bnn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_bnn.py new file mode 100644 index 0000000..a32fb10 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_bnn.py @@ -0,0 +1,103 @@ +from ..registry import RECOGNIZERS +from .recognizer2d import Recognizer2D + + +@RECOGNIZERS.register_module() +class Recognizer2DBNN(Recognizer2D): + """2D recognizer model framework.""" + + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + losses = dict() + + x = self.extract_feat(imgs) + + if hasattr(self, 'debias_head'): + loss_debias = self.debias_head(x, num_segs=num_segs, target=labels.squeeze(), **kwargs) + losses.update(loss_debias) + + if hasattr(self, 'neck'): + x = [ + each.reshape((-1, num_segs) + + each.shape[1:]).transpose(1, 2).contiguous() + for each in x + ] + x, _ = self.neck(x, labels.squeeze()) + x = x.squeeze(2) + num_segs = 1 + + outputs = self.cls_head(x, num_segs, npass=self.train_cfg['npass'], testing=False) + # parse the outputs + cls_score = outputs['pred_mean'] + gt_labels = labels.squeeze() + loss_dict = self.cls_head.bnn_loss(cls_score, gt_labels, outputs, beta=self.train_cfg['loss_weight'], **kwargs) + losses.update(loss_dict) + + return losses + + def _do_test(self, imgs): + """Defines the computation performed at every call when evaluation, + testing and gradcam.""" + batches = imgs.shape[0] + + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + losses = dict() + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x = [ + each.reshape((-1, num_segs) + + each.shape[1:]).transpose(1, 2).contiguous() + for each in x + ] + x, loss_aux = self.neck(x) + x = x.squeeze(2) + losses.update(loss_aux) + num_segs = 1 + + # When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes] + # When using `TSMHead`, shape is [batch_size * num_crops, num_classes] + # `num_crops` is calculated by: + # 1) `twice_sample` in `SampleFrames` + # 2) `num_sample_positions` in `DenseSampleFrames` + # 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline` + # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` + outputs = self.cls_head(x, num_segs, npass=self.test_cfg['npass'], testing=True) + cls_score = outputs['pred_mean'] + assert cls_score.size()[0] % batches == 0 + # calculate num_crops automatically + cls_score = self.average_clip(cls_score, + cls_score.size()[0] // batches) + + return cls_score + + def forward_test(self, imgs): + """Defines the computation performed at every call when evaluation and + testing.""" + return self._do_test(imgs).cpu().numpy() + + def forward_dummy(self, imgs): + """Used for computing network FLOPs. + + See ``tools/analysis/get_flops.py``. + + Args: + imgs (torch.Tensor): Input images. + + Returns: + Tensor: Class score. + """ + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + x = self.extract_feat(imgs) + outputs = (self.cls_head(x, num_segs, npass=self.test_cfg['npass'], testing=True), ) + outs = (outputs['pred_mean'], ) + return outs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_rpl.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_rpl.py new file mode 100644 index 0000000..9712fbf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer2d_rpl.py @@ -0,0 +1,97 @@ +from ..registry import RECOGNIZERS +from .recognizer2d import Recognizer2D + + +@RECOGNIZERS.register_module() +class Recognizer2DRPL(Recognizer2D): + """2D recognizer model framework.""" + + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + losses = dict() + + x = self.extract_feat(imgs) + + if hasattr(self, 'neck'): + x = [ + each.reshape((-1, num_segs) + + each.shape[1:]).transpose(1, 2).contiguous() + for each in x + ] + x, _ = self.neck(x, labels.squeeze()) + x = x.squeeze(2) + num_segs = 1 + + outputs = self.cls_head(x, num_segs) + gt_labels = labels.squeeze() + loss_dict = self.cls_head.loss_cls(outputs, gt_labels, **kwargs) + losses.update(loss_dict) + + return losses + + def _do_test(self, imgs): + """Defines the computation performed at every call when evaluation, + testing and gradcam.""" + batches = imgs.shape[0] + + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + losses = dict() + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x = [ + each.reshape((-1, num_segs) + + each.shape[1:]).transpose(1, 2).contiguous() + for each in x + ] + x, loss_aux = self.neck(x) + x = x.squeeze(2) + losses.update(loss_aux) + num_segs = 1 + + # When using `TSNHead` or `TPNHead`, shape is [batch_size, num_classes] + # When using `TSMHead`, shape is [batch_size * num_crops, num_classes] + # `num_crops` is calculated by: + # 1) `twice_sample` in `SampleFrames` + # 2) `num_sample_positions` in `DenseSampleFrames` + # 3) `ThreeCrop/TenCrop/MultiGroupCrop` in `test_pipeline` + # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` + outputs = self.cls_head(x, num_segs) + cls_score = outputs['dist'] + assert cls_score.size()[0] % batches == 0 + # calculate num_crops automatically + cls_score = self.average_clip(cls_score, + cls_score.size()[0] // batches) + + return cls_score + + def forward_test(self, imgs): + """Defines the computation performed at every call when evaluation and + testing.""" + return self._do_test(imgs).cpu().numpy() + + def forward_dummy(self, imgs): + """Used for computing network FLOPs. + + See ``tools/analysis/get_flops.py``. + + Args: + imgs (torch.Tensor): Input images. + + Returns: + Tensor: Class score. + """ + batches = imgs.shape[0] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + num_segs = imgs.shape[0] // batches + + x = self.extract_feat(imgs) + outputs = self.cls_head(x, num_segs) + outs = (outputs['dist'], ) + return outs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d.py new file mode 100644 index 0000000..15d65f6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d.py @@ -0,0 +1,89 @@ +from ..registry import RECOGNIZERS +from .base import BaseRecognizer +import pdb + + +@RECOGNIZERS.register_module() +class Recognizer3D(BaseRecognizer): + """3D recognizer model framework.""" + + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + losses = dict() + + x = self.extract_feat(imgs) + + if hasattr(self, 'debias_head'): + loss_debias = self.debias_head(x, target=labels.squeeze(), **kwargs) + losses.update(loss_debias) + + if hasattr(self, 'neck'): + x, loss_aux = self.neck(x, labels.squeeze()) + losses.update(loss_aux) + + cls_score = self.cls_head(x) + gt_labels = labels.squeeze() + loss_cls = self.cls_head.loss(cls_score, gt_labels, **kwargs) + losses.update(loss_cls) + + return losses + + def _do_test(self, imgs): + """Defines the computation performed at every call when evaluation, + testing and gradcam.""" + num_segs = imgs.shape[1] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, _ = self.neck(x) + + cls_score = self.cls_head(x) + cls_score = self.average_clip(cls_score, num_segs) + + return cls_score + + def forward_test(self, imgs): + """Defines the computation performed at every call when evaluation and + testing.""" + # pdb.set_trace() + return self._do_test(imgs).cpu().numpy() + + def forward_dummy(self, imgs): + """Used for computing network FLOPs. + + See ``tools/analysis/get_flops.py``. + + Args: + imgs (torch.Tensor): Input images. + + Returns: + Tensor: Class score. + """ + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + outs = (self.cls_head(x), ) + return outs + + def forward_gradcam(self, imgs): + """Defines the computation performed at every call when using gradcam + utils.""" + return self._do_test(imgs) + + def get_feat(self, imgs, return_score=False): + """Defines the computation performed at every call when using get_feat + utils.""" + num_segs = imgs.shape[1] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, _ = self.neck(x) # (num_clips * num_crops, 2048, 1, 8, 8) + + if return_score: + cls_score = self.cls_head(x) + cls_score = self.average_clip(cls_score, num_segs) + return x, cls_score + return x \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_bnn.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_bnn.py new file mode 100644 index 0000000..64150a3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_bnn.py @@ -0,0 +1,61 @@ +from ..registry import RECOGNIZERS +from .recognizer3d import Recognizer3D + + +@RECOGNIZERS.register_module() +class Recognizer3DBNN(Recognizer3D): + """3D recognizer model framework.""" + + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + losses = dict() + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, loss_aux = self.neck(x, labels.squeeze()) + losses.update(loss_aux) + + outputs = self.cls_head(x, npass=self.train_cfg['npass'], testing=False) + # parse the outputs + cls_score = outputs['pred_mean'] + gt_labels = labels.squeeze() + loss_dict = self.cls_head.bnn_loss(cls_score, gt_labels, outputs, beta=self.train_cfg['loss_weight'], **kwargs) + losses.update(loss_dict) + + return losses + + def _do_test(self, imgs): + """Defines the computation performed at every call when evaluation, + testing and gradcam.""" + num_segs = imgs.shape[1] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, _ = self.neck(x) + + outputs = self.cls_head(x, npass=self.test_cfg['npass'], testing=True) + cls_score = outputs['pred_mean'] + cls_score = self.average_clip(cls_score, num_segs) + + return cls_score + + + def forward_dummy(self, imgs): + """Used for computing network FLOPs. + + See ``tools/analysis/get_flops.py``. + + Args: + imgs (torch.Tensor): Input images. + + Returns: + Tensor: Class score. + """ + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + outputs = self.cls_head(x, npass=self.test_cfg['npass'], testing=True) + outs = (outputs['pred_mean'], ) + return outs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_rpl.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_rpl.py new file mode 100644 index 0000000..a15a758 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/recognizers/recognizer3d_rpl.py @@ -0,0 +1,59 @@ +from ..registry import RECOGNIZERS +from .recognizer3d import Recognizer3D + + +@RECOGNIZERS.register_module() +class Recognizer3DRPL(Recognizer3D): + """3D recognizer model framework.""" + + def forward_train(self, imgs, labels, **kwargs): + """Defines the computation performed at every call when training.""" + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + losses = dict() + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, loss_aux = self.neck(x, labels.squeeze()) + losses.update(loss_aux) + + outputs = self.cls_head(x) + gt_labels = labels.squeeze() + loss_dict = self.cls_head.loss_cls(outputs, gt_labels, **kwargs) + losses.update(loss_dict) + + return losses + + def _do_test(self, imgs): + """Defines the computation performed at every call when evaluation, + testing and gradcam.""" + num_segs = imgs.shape[1] + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + if hasattr(self, 'neck'): + x, _ = self.neck(x) + + outputs = self.cls_head(x) + cls_score = outputs['dist'] # the negative distance is equivalent to the cls_score before softmax + cls_score = self.average_clip(cls_score, num_segs) + + return cls_score + + + def forward_dummy(self, imgs): + """Used for computing network FLOPs. + + See ``tools/analysis/get_flops.py``. + + Args: + imgs (torch.Tensor): Input images. + + Returns: + Tensor: Class score. + """ + imgs = imgs.reshape((-1, ) + imgs.shape[2:]) + + x = self.extract_feat(imgs) + outputs = self.cls_head(x) + outs = (outputs['dist'], ) + return outs diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/models/registry.py b/Downstream/Open-Set-Action-Recognition/mmaction/models/registry.py new file mode 100644 index 0000000..61dc40c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/models/registry.py @@ -0,0 +1,8 @@ +from mmcv.utils import Registry + +BACKBONES = Registry('backbone') +NECKS = Registry('neck') +HEADS = Registry('head') +RECOGNIZERS = Registry('recognizer') +LOSSES = Registry('loss') +LOCALIZERS = Registry('localizer') diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/utils/__init__.py b/Downstream/Open-Set-Action-Recognition/mmaction/utils/__init__.py new file mode 100644 index 0000000..02236c0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/utils/__init__.py @@ -0,0 +1,9 @@ +from .collect_env import collect_env +from .gradcam_utils import GradCAM +from .logger import get_root_logger +from .misc import get_random_string, get_shm_dir, get_thread_id + +__all__ = [ + 'get_root_logger', 'collect_env', 'get_random_string', 'get_thread_id', + 'get_shm_dir', 'GradCAM' +] diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/utils/collect_env.py b/Downstream/Open-Set-Action-Recognition/mmaction/utils/collect_env.py new file mode 100644 index 0000000..b2e2a6c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/utils/collect_env.py @@ -0,0 +1,16 @@ +from mmcv.utils import collect_env as collect_basic_env +from mmcv.utils import get_git_hash + +import mmaction + + +def collect_env(): + env_info = collect_basic_env() + env_info['MMAction2'] = ( + mmaction.__version__ + '+' + get_git_hash(digits=7)) + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/utils/gradcam_utils.py b/Downstream/Open-Set-Action-Recognition/mmaction/utils/gradcam_utils.py new file mode 100644 index 0000000..d504842 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/utils/gradcam_utils.py @@ -0,0 +1,231 @@ +import torch +import torch.nn.functional as F + + +class GradCAM: + """GradCAM class helps create visualization results. + + Visualization results are blended by heatmaps and input images. + This class is modified from + https://github.com/facebookresearch/SlowFast/blob/master/slowfast/visualization/gradcam_utils.py # noqa + For more information about GradCAM, please visit: + https://arxiv.org/pdf/1610.02391.pdf + """ + + def __init__(self, model, target_layer_name, colormap='viridis'): + """Create GradCAM class with recognizer, target layername & colormap. + + Args: + model (nn.Module): the recognizer model to be used. + target_layer_name (str): name of convolutional layer to + be used to get gradients and feature maps from for creating + localization maps. + colormap (Optional[str]): matplotlib colormap used to create + heatmap. Default: 'viridis'. For more information, please visit + https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html + """ + from ..models.recognizers import Recognizer2D, Recognizer3D + if isinstance(model, Recognizer2D): + self.is_recognizer2d = True + elif isinstance(model, Recognizer3D): + self.is_recognizer2d = False + else: + raise ValueError( + 'GradCAM utils only support Recognizer2D & Recognizer3D.') + + self.model = model + self.model.eval() + self.target_gradients = None + self.target_activations = None + + import matplotlib.pyplot as plt + self.colormap = plt.get_cmap(colormap) + self.data_mean = torch.tensor(model.cfg.img_norm_cfg['mean']) + self.data_std = torch.tensor(model.cfg.img_norm_cfg['std']) + self._register_hooks(target_layer_name) + + def _register_hooks(self, layer_name): + """Register forward and backward hook to a layer, given layer_name, to + obtain gradients and activations. + + Args: + layer_name (str): name of the layer. + """ + + def get_gradients(module, grad_input, grad_output): + self.target_gradients = grad_output[0].detach() + + def get_activations(module, input, output): + self.target_activations = output.clone().detach() + + layer_ls = layer_name.split('/') + prev_module = self.model + for layer in layer_ls: + prev_module = prev_module._modules[layer] + + target_layer = prev_module + target_layer.register_forward_hook(get_activations) + target_layer.register_backward_hook(get_gradients) + + def _calculate_localization_map(self, inputs, use_labels, delta=1e-20): + """Calculate localization map for all inputs with Grad-CAM. + + Args: + inputs (dict): model inputs, generated by test pipeline, + at least including two keys, ``imgs`` and ``label``. + use_labels (bool): Whether to use given labels to generate + localization map. Labels are in ``inputs['label']``. + delta (float): used in localization map normalization, + must be small enough. Please make sure + `localization_map_max - localization_map_min >> delta` + Returns: + tuple[torch.Tensor, torch.Tensor]: (localization_map, preds) + localization_map (torch.Tensor): the localization map for + input imgs. + preds (torch.Tensor): Model predictions for `inputs` with + shape (batch_size, num_classes). + """ + inputs['imgs'] = inputs['imgs'].clone() + + # model forward & backward + preds = self.model(gradcam=True, **inputs) + if use_labels: + labels = inputs['label'] + if labels.ndim == 1: + labels = labels.unsqueeze(-1) + score = torch.gather(preds, dim=1, index=labels) + else: + score = torch.max(preds, dim=-1)[0] + self.model.zero_grad() + score = torch.sum(score) + score.backward() + + if self.is_recognizer2d: + # [batch_size, num_segments, 3, H, W] + b, t, _, h, w = inputs['imgs'].size() + else: + # [batch_size, num_crops*num_clips, 3, clip_len, H, W] + b1, b2, _, t, h, w = inputs['imgs'].size() + b = b1 * b2 + + gradients = self.target_gradients + activations = self.target_activations + if self.is_recognizer2d: + # [B*Tg, C', H', W'] + b_tg, c, _, _ = gradients.size() + tg = b_tg // b + else: + # source shape: [B, C', Tg, H', W'] + _, c, tg, _, _ = gradients.size() + # target shape: [B, Tg, C', H', W'] + gradients = gradients.permute(0, 2, 1, 3, 4) + activations = activations.permute(0, 2, 1, 3, 4) + + # calculate & resize to [B, 1, T, H, W] + weights = torch.mean(gradients.view(b, tg, c, -1), dim=3) + weights = weights.view(b, tg, c, 1, 1) + activations = activations.view([b, tg, c] + + list(activations.size()[-2:])) + localization_map = torch.sum( + weights * activations, dim=2, keepdim=True) + localization_map = F.relu(localization_map) + localization_map = localization_map.permute(0, 2, 1, 3, 4) + localization_map = F.interpolate( + localization_map, + size=(t, h, w), + mode='trilinear', + align_corners=False) + + # Normalize the localization map. + localization_map_min, localization_map_max = ( + torch.min(localization_map.view(b, -1), dim=-1, keepdim=True)[0], + torch.max(localization_map.view(b, -1), dim=-1, keepdim=True)[0]) + localization_map_min = torch.reshape( + localization_map_min, shape=(b, 1, 1, 1, 1)) + localization_map_max = torch.reshape( + localization_map_max, shape=(b, 1, 1, 1, 1)) + localization_map = (localization_map - localization_map_min) / ( + localization_map_max - localization_map_min + delta) + localization_map = localization_map.data + + return localization_map.squeeze(dim=1), preds + + def _alpha_blending(self, localization_map, input_imgs, alpha): + """Blend heatmaps and model input images and get visulization results. + + Args: + localization_map (torch.Tensor): localization map for all inputs, + generated with Grad-CAM + input_imgs (torch.Tensor): model inputs, normed images. + alpha (float): transparency level of the heatmap, + in the range [0, 1]. + Returns: + torch.Tensor: blending results for localization map and input + images, with shape [B, T, H, W, 3] and pixel values in + RGB order within range [0, 1]. + """ + # localization_map shape [B, T, H, W] + localization_map = localization_map.cpu() + + # heatmap shape [B, T, H, W, 3] in RGB order + heatmap = self.colormap(localization_map.detach().numpy()) + heatmap = heatmap[:, :, :, :, :3] + heatmap = torch.from_numpy(heatmap) + + # Permute input imgs to [B, T, H, W, 3], like heatmap + if self.is_recognizer2d: + # Recognizer2D input (B, T, C, H, W) + curr_inp = input_imgs.permute(0, 1, 3, 4, 2) + else: + # Recognizer3D input (B', num_clips*num_crops, C, T, H, W) + # B = B' * num_clips * num_crops + curr_inp = input_imgs.view([-1] + list(input_imgs.size()[2:])) + curr_inp = curr_inp.permute(0, 2, 3, 4, 1) + + # renormalize input imgs to [0, 1] + curr_inp = curr_inp.cpu() + curr_inp *= self.data_std + curr_inp += self.data_mean + curr_inp /= 255. + + # alpha blending + blended_imgs = alpha * heatmap + (1 - alpha) * curr_inp + + return blended_imgs + + def __call__(self, inputs, use_labels=False, alpha=0.5): + """Visualize the localization maps on their corresponding inputs as + heatmap, using Grad-CAM. + + Generate visualization results for **ALL CROPS**. + For example, for I3D model, if `clip_len=32, num_clips=10` and + use `ThreeCrop` in test pipeline, then for every model inputs, + there are 960(32*10*3) images generated. + + Args: + inputs (dict): model inputs, generated by test pipeline, + at least including two keys, ``imgs`` and ``label``. + use_labels (bool): Whether to use given labels to generate + localization map. Labels are in ``inputs['label']``. + alpha (float): transparency level of the heatmap, + in the range [0, 1]. + Returns: + blended_imgs (torch.Tensor): Visualization results, blended by + localization maps and model inputs. + preds (torch.Tensor): Model predictions for inputs. + """ + + # localization_map shape [B, T, H, W] + # preds shape [batch_size, num_classes] + localization_map, preds = self._calculate_localization_map( + inputs, use_labels=use_labels) + + # blended_imgs shape [B, T, H, W, 3] + blended_imgs = self._alpha_blending(localization_map, inputs['imgs'], + alpha) + + # blended_imgs shape [B, T, H, W, 3] + # preds shape [batch_size, num_classes] + # Recognizer2D: B = batch_size, T = num_segments + # Recognizer3D: B = batch_size * num_crops * num_clips, T = clip_len + return blended_imgs, preds diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/utils/logger.py b/Downstream/Open-Set-Action-Recognition/mmaction/utils/logger.py new file mode 100644 index 0000000..7950860 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/utils/logger.py @@ -0,0 +1,24 @@ +import logging + +from mmcv.utils import get_logger + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Use ``get_logger`` method in mmcv to get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If ``log_file`` is specified, a FileHandler + will also be added. The name of the root logger is the top-level package + name, e.g., "mmaction". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + :obj:`logging.Logger`: The root logger. + """ + return get_logger(__name__.split('.')[0], log_file, log_level) diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/utils/misc.py b/Downstream/Open-Set-Action-Recognition/mmaction/utils/misc.py new file mode 100644 index 0000000..5ec9550 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/utils/misc.py @@ -0,0 +1,26 @@ +import ctypes +import random +import string + + +def get_random_string(length=15): + """Get random string with letters and digits. + + Args: + length (int): Length of random string. Default: 15. + """ + return ''.join( + random.choice(string.ascii_letters + string.digits) + for _ in range(length)) + + +def get_thread_id(): + """Get current thread id.""" + # use ctype to find thread id + thread_id = ctypes.CDLL('libc.so.6').syscall(186) + return thread_id + + +def get_shm_dir(): + """Get shm dir for temporary usage.""" + return '/dev/shm' diff --git a/Downstream/Open-Set-Action-Recognition/mmaction/version.py b/Downstream/Open-Set-Action-Recognition/mmaction/version.py new file mode 100644 index 0000000..48c1ac9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/mmaction/version.py @@ -0,0 +1,18 @@ +# Copyright (c) Open-MMLab. All rights reserved. + +__version__ = '0.9.0' + + +def parse_version_info(version_str): + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) diff --git a/Downstream/Open-Set-Action-Recognition/requirements.txt b/Downstream/Open-Set-Action-Recognition/requirements.txt new file mode 100644 index 0000000..6981bd7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements.txt @@ -0,0 +1,4 @@ +-r requirements/build.txt +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/Downstream/Open-Set-Action-Recognition/requirements/build.txt b/Downstream/Open-Set-Action-Recognition/requirements/build.txt new file mode 100644 index 0000000..71888da --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements/build.txt @@ -0,0 +1,3 @@ +# These must be installed before building mmaction2 +numpy +torch>=1.3 diff --git a/Downstream/Open-Set-Action-Recognition/requirements/docs.txt b/Downstream/Open-Set-Action-Recognition/requirements/docs.txt new file mode 100644 index 0000000..89fbf86 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements/docs.txt @@ -0,0 +1,4 @@ +recommonmark +sphinx +sphinx_markdown_tables +sphinx_rtd_theme diff --git a/Downstream/Open-Set-Action-Recognition/requirements/optional.txt b/Downstream/Open-Set-Action-Recognition/requirements/optional.txt new file mode 100644 index 0000000..5d81d94 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements/optional.txt @@ -0,0 +1,6 @@ +av +decord >= 0.4.1 +moviepy +onnx +onnxruntime +PyTurboJPEG diff --git a/Downstream/Open-Set-Action-Recognition/requirements/readthedocs.txt b/Downstream/Open-Set-Action-Recognition/requirements/readthedocs.txt new file mode 100644 index 0000000..0542bfc --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements/readthedocs.txt @@ -0,0 +1,3 @@ +mmcv +torch +torchvision diff --git a/Downstream/Open-Set-Action-Recognition/requirements/runtime.txt b/Downstream/Open-Set-Action-Recognition/requirements/runtime.txt new file mode 100644 index 0000000..e9fee58 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements/runtime.txt @@ -0,0 +1,4 @@ +matplotlib +numpy +opencv-contrib-python +Pillow diff --git a/Downstream/Open-Set-Action-Recognition/requirements/tests.txt b/Downstream/Open-Set-Action-Recognition/requirements/tests.txt new file mode 100644 index 0000000..29d87ad --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/requirements/tests.txt @@ -0,0 +1,9 @@ +coverage +flake8 +interrogate +isort==4.3.21 +pytest +pytest-runner +xdoctest >= 0.10.0 +yapf +terminaltables==3.1.0 \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/setup.cfg b/Downstream/Open-Set-Action-Recognition/setup.cfg new file mode 100644 index 0000000..e720d52 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/setup.cfg @@ -0,0 +1,24 @@ +[bdist_wheel] +universal=1 + +[aliases] +test=pytest + +[tool:pytest] +addopts=tests/ + +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true +split_penalty_import_names=0 +SPLIT_PENALTY_AFTER_OPENING_BRACKET=800 + +[isort] +line_length = 79 +multi_line_output = 0 +known_standard_library = pkg_resources,setuptools +known_first_party = mmaction +known_third_party = cv2,joblib,matplotlib,mmcv,numpy,pandas,pytest,scipy,seaborn,torch +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY diff --git a/Downstream/Open-Set-Action-Recognition/setup.py b/Downstream/Open-Set-Action-Recognition/setup.py new file mode 100644 index 0000000..e648563 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/setup.py @@ -0,0 +1,128 @@ +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +version_file = 'mmaction/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + elif '@git+' in line: + info['package'] = line + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +if __name__ == '__main__': + setup( + name='mmaction2', + version=get_version(), + description='OpenMMLab Action Understanding Toolbox and Benchmark', + long_description=readme(), + long_description_content_type='text/markdown', + maintainer='MMAction2 Authors', + maintainer_email='openmmlab@gmail.com', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + keywords='computer vision, action understanding', + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + ], + url='https://github.com/open-mmlab/mmaction2', + license='Apache License 2.0', + setup_requires=parse_requirements('requirements/build.txt'), + tests_require=parse_requirements('requirements/tests.txt'), + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'build': parse_requirements('requirements/build.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + }, + zip_safe=False) diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/action_test_anno.json b/Downstream/Open-Set-Action-Recognition/tests/data/action_test_anno.json new file mode 100644 index 0000000..e19fc76 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/action_test_anno.json @@ -0,0 +1,34 @@ + { + "v_test1": { + "duration_second": 1, + "duration_frame": 30, + "annotations": [ + { + "segment": [ + 0.3, + 0.6 + ], + "label": "Rock climbing" + } + ], + "feature_frame": 30, + "fps": 30.0, + "rfps": 30 + }, + "v_test2": { + "duration_second": 2, + "duration_frame": 48, + "annotations": [ + { + "segment": [ + 1.0, + 2.0 + ], + "label": "Drinking beer" + } + ], + "feature_frame": 48, + "fps": 24.0, + "rfps": 24.0 + } + } diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/audio_feature_test_list.txt b/Downstream/Open-Set-Action-Recognition/tests/data/audio_feature_test_list.txt new file mode 100644 index 0000000..98a4ea9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/audio_feature_test_list.txt @@ -0,0 +1,2 @@ +test 100 127 +test 100 127 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/audio_test_list.txt b/Downstream/Open-Set-Action-Recognition/tests/data/audio_test_list.txt new file mode 100644 index 0000000..57935b0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/audio_test_list.txt @@ -0,0 +1,2 @@ +test.wav 100 127 +test.wav 100 127 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list.txt b/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list.txt new file mode 100644 index 0000000..178fb0a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list.txt @@ -0,0 +1,2 @@ +test_imgs 5 127 +test_imgs 5 127 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list_multi_label.txt b/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list_multi_label.txt new file mode 100644 index 0000000..ddd9dff --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list_multi_label.txt @@ -0,0 +1,2 @@ +test_imgs 5 1 +test_imgs 5 3 5 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list_with_offset.txt b/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list_with_offset.txt new file mode 100644 index 0000000..bd44d27 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/frame_test_list_with_offset.txt @@ -0,0 +1,2 @@ +test_imgs 2 5 127 +test_imgs 2 5 127 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/hvu_frame_test_anno.json b/Downstream/Open-Set-Action-Recognition/tests/data/hvu_frame_test_anno.json new file mode 100644 index 0000000..3b3c7a8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/hvu_frame_test_anno.json @@ -0,0 +1,24 @@ +[ + { + "frame_dir":"test_imgs", + "total_frames":5, + "label":{ + "concept":[250, 131, 42, 51, 57, 155, 122], + "object":[1570, 508], + "event":[16], + "action":[180], + "scene":[206] + } + }, + { + "frame_dir":"test_imgs", + "total_frames":5, + "label":{ + "concept":[250, 131, 42, 51, 57, 155, 122], + "object":[1570, 508], + "event":[16], + "action":[180], + "scene":[206] + } + } +] diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/hvu_video_eval_test_anno.json b/Downstream/Open-Set-Action-Recognition/tests/data/hvu_video_eval_test_anno.json new file mode 100644 index 0000000..f0f98dd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/hvu_video_eval_test_anno.json @@ -0,0 +1,18 @@ +[ + { + "filename":"test.mp4", + "label":{ + "action": [2], + "scene": [2], + "object": [1] + } + }, + { + "filename":"test.avi", + "label":{ + "action": [1], + "scene": [1], + "object": [2] + } + } +] diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/hvu_video_test_anno.json b/Downstream/Open-Set-Action-Recognition/tests/data/hvu_video_test_anno.json new file mode 100644 index 0000000..ae20d24 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/hvu_video_test_anno.json @@ -0,0 +1,22 @@ +[ + { + "filename":"tmp.mp4", + "label":{ + "concept":[250, 131, 42, 51, 57, 155, 122], + "object":[1570, 508], + "event":[16], + "action":[180], + "scene":[206] + } + }, + { + "filename":"tmp.mp4", + "label":{ + "concept":[250, 131, 42, 51, 57, 155, 122], + "object":[1570, 508], + "event":[16], + "action":[180], + "scene":[206] + } + } +] diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/proposal_normalized_list.txt b/Downstream/Open-Set-Action-Recognition/tests/data/proposal_normalized_list.txt new file mode 100644 index 0000000..18914f4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/proposal_normalized_list.txt @@ -0,0 +1,18 @@ +# 0 +test_imgs +5 +1 +2 +3 0.2000 0.4000 +3 0.6000 1.0000 +10 +3 1.0000 1.0000 0.2000 0.4000 +3 0.5000 0.5000 0.2000 0.6000 +3 0.3333 0.3333 0.2000 0.8000 +3 0.5000 0.5000 0.2000 1.0000 +3 0.0000 0.0000 0.4000 0.6000 +3 0.3333 0.5000 0.4000 0.8000 +3 0.6666 0.6666 0.4000 1.0000 +3 0.5000 1.0000 0.6000 0.8000 +3 1.0000 1.0000 0.6000 1.0000 +3 0.5000 1.0000 0.8000 1.0000 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/proposal_test_list.txt b/Downstream/Open-Set-Action-Recognition/tests/data/proposal_test_list.txt new file mode 100644 index 0000000..8dc8dea --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/proposal_test_list.txt @@ -0,0 +1,18 @@ +# 0 +test_imgs +5 +1 +2 +3 1 2 +3 3 5 +10 +3 1.0000 1.0000 1 2 +3 0.5000 0.5000 1 3 +3 0.3333 0.3333 1 4 +3 0.5000 0.5000 1 5 +3 0.0000 0.0000 2 3 +3 0.3333 0.5000 2 4 +3 0.6666 0.6666 2 5 +3 0.5000 1.0000 3 4 +3 1.0000 1.0000 3 5 +3 0.5000 1.0000 4 5 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/rawvideo_test_anno.json b/Downstream/Open-Set-Action-Recognition/tests/data/rawvideo_test_anno.json new file mode 100644 index 0000000..821b6dd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/rawvideo_test_anno.json @@ -0,0 +1,8 @@ +[ + { + "video_dir":"test_rawvideo_dataset", + "label":1, + "num_clips":2, + "positive_clip_inds":[0] + } +] diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/rawvideo_test_anno.txt b/Downstream/Open-Set-Action-Recognition/tests/data/rawvideo_test_anno.txt new file mode 100644 index 0000000..b65a54e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/rawvideo_test_anno.txt @@ -0,0 +1 @@ +test_rawvideo_dataset 1 2 0 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test.jpg new file mode 100644 index 0000000..d88aea0 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test.mp4 b/Downstream/Open-Set-Action-Recognition/tests/data/test.mp4 new file mode 100644 index 0000000..ef46c79 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test.mp4 differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test.wav b/Downstream/Open-Set-Action-Recognition/tests/data/test.wav new file mode 100644 index 0000000..c66741b Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test.wav differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_activitynet_features/v_test1.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_activitynet_features/v_test1.csv new file mode 100644 index 0000000..5e713e7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_activitynet_features/v_test1.csv @@ -0,0 +1,6 @@ +f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24,f25,f26,f27,f28,f29,f30,f31,f32,f33,f34,f35,f36,f37,f38,f39,f40,f41,f42,f43,f44,f45,f46,f47,f48,f49,f50,f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63,f64,f65,f66,f67,f68,f69,f70,f71,f72,f73,f74,f75,f76,f77,f78,f79,f80,f81,f82,f83,f84,f85,f86,f87,f88,f89,f90,f91,f92,f93,f94,f95,f96,f97,f98,f99,f100,f101,f102,f103,f104,f105,f106,f107,f108,f109,f110,f111,f112,f113,f114,f115,f116,f117,f118,f119,f120,f121,f122,f123,f124,f125,f126,f127,f128,f129,f130,f131,f132,f133,f134,f135,f136,f137,f138,f139,f140,f141,f142,f143,f144,f145,f146,f147,f148,f149,f150,f151,f152,f153,f154,f155,f156,f157,f158,f159,f160,f161,f162,f163,f164,f165,f166,f167,f168,f169,f170,f171,f172,f173,f174,f175,f176,f177,f178,f179,f180,f181,f182,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f193,f194,f195,f196,f197,f198,f199,f200,f201,f202,f203,f204,f205,f206,f207,f208,f209,f210,f211,f212,f213,f214,f215,f216,f217,f218,f219,f220,f221,f222,f223,f224,f225,f226,f227,f228,f229,f230,f231,f232,f233,f234,f235,f236,f237,f238,f239,f240,f241,f242,f243,f244,f245,f246,f247,f248,f249,f250,f251,f252,f253,f254,f255,f256,f257,f258,f259,f260,f261,f262,f263,f264,f265,f266,f267,f268,f269,f270,f271,f272,f273,f274,f275,f276,f277,f278,f279,f280,f281,f282,f283,f284,f285,f286,f287,f288,f289,f290,f291,f292,f293,f294,f295,f296,f297,f298,f299,f300,f301,f302,f303,f304,f305,f306,f307,f308,f309,f310,f311,f312,f313,f314,f315,f316,f317,f318,f319,f320,f321,f322,f323,f324,f325,f326,f327,f328,f329,f330,f331,f332,f333,f334,f335,f336,f337,f338,f339,f340,f341,f342,f343,f344,f345,f346,f347,f348,f349,f350,f351,f352,f353,f354,f355,f356,f357,f358,f359,f360,f361,f362,f363,f364,f365,f366,f367,f368,f369,f370,f371,f372,f373,f374,f375,f376,f377,f378,f379,f380,f381,f382,f383,f384,f385,f386,f387,f388,f389,f390,f391,f392,f393,f394,f395,f396,f397,f398,f399 +-2.52400826749,0.0481050342173,-0.727137195971,2.75537272315,3.09127621822,-1.57007092339,-0.418208286763,0.0913230466118,-0.536148328353,-0.527615223662,1.09348152733,-0.740857539139,1.03076939449,0.947990020203,-0.00932133916349,0.546988826083,-0.737920381243,0.823520260094,-1.44379751155,1.67705288164,1.85386635752,0.62453161102,1.13374109944,-0.161873651211,1.40335457467,0.267813141882,1.40327533282,0.143771933515,-0.29447495679,0.779869429758,-1.38585145822,-0.361671252653,-1.46679541523,0.0859254586217,0.266080879981,-0.680839165484,-0.774731957742,-0.618207527285,1.57201336054,0.875829197772,-0.896498123858,-2.55398872891,-0.796735937603,-0.338483746318,0.511324636391,-1.21437529424,-0.0488620607446,0.253289302886,2.71006785221,-0.573161459164,-0.341657902954,-0.854258292083,0.562081610284,-0.828878082845,2.00327134909,1.29068322546,-0.418051389774,1.14570354001,1.39098484308,-1.13415579068,-1.01751984858,-0.823485884605,0.354335798556,1.79059040272,0.609877418462,-1.01807533199,1.56390048495,1.00308338848,0.226345738051,-0.145077751076,0.0986133282503,-0.0274079232177,0.0618308794267,2.33058959297,0.0527062771437,-1.11440070055,-2.85928208684,2.15750540841,0.866524370256,-0.999664886812,0.65322760642,-1.01907039308,-0.827862563442,0.702348045951,-0.266591888881,-0.51787754913,-0.87550654118,-1.08840756221,-0.330164993751,-0.885034718769,-1.09602854198,-1.90739000514,-1.41201400125,3.55564525741,2.24864990051,1.85192671744,-0.886962869481,-0.706411036437,0.962288821262,-1.30219301658,0.0603706527015,-0.672105670826,-0.147220359933,-1.00931681574,-1.34130794644,-0.0213488208036,-0.965187689045,0.427090878957,-0.922304333641,-1.13947635577,0.637382086489,-1.706998011,0.00132625548269,0.663770250584,1.58249601114,-1.04340366269,0.375227416108,-0.0870821477482,0.551722806776,0.588611513848,-0.477017772079,-1.51536188044,0.237936462599,0.31261506067,-0.198127712396,-0.318572429209,-1.18890325315,0.035582087437,2.67528950232,-0.197889107378,1.55762961412,0.104639883842,-1.66993450781,0.702282006582,1.36717389178,0.634535223722,2.85315937821,-1.27367064913,0.483830422936,-0.869812565212,0.641265734616,-0.11914733068,1.0239396073,-3.92902142357,0.694317328488,1.34085481986,-0.135329176331,0.0261293066915,-0.303456270416,0.909167548313,-2.04735304332,-0.285427697695,-1.03457319064,-2.77420531572,0.197031497599,-0.520362589547,-1.37924786457,-0.418569629841,1.54322130788,1.83725603097,3.35605137842,-0.117215889143,-0.970470848036,-0.339063598965,1.57921290781,0.196319119013,-1.22568776573,-0.448961007657,0.609897182756,-0.168152849526,0.254480323573,-0.51589471003,-0.253088873187,-0.716572365129,-1.56268640697,-3.33835895995,-0.679914745818,0.107016925667,-1.61204098026,-0.387739681651,2.40210230323,-1.0956975287,-1.72501473746,-0.766200882827,0.752211827669,1.55532805525,0.113983938016,4.54239864121,-1.36827292666,-1.88835217549,1.40817465219,0.708602657522,1.31514883588,0.0314930005956,-0.79571607963,0.75615035674,1.14977174081,-1.72166323668,0.565034879125,-1.41448308724,-1.57710396359,-1.17078288789,1.1485206762,0.393694747107,1.20387821507,0.699366232003,1.80047030851,1.42655580688,-1.41627641805,-0.0899006423315,1.0611155262,-1.131250839,2.23898952868,-3.58230877813,-0.889216990584,1.40956827182,-1.46751403757,-0.691296854089,-1.54265676827,2.65262625498,2.19788404633,-2.01697903653,0.611521417417,0.359316692791,4.6816105414,0.862952723244,0.167491980372,2.6932665368,-3.00625465314,-0.351348050268,-0.89827277051,1.1813078626,-0.683418750015,0.612255702038,1.80744153164,0.0561640557506,-1.55411351133,0.711329718813,-3.72017506799,0.381065155569,-0.414420442519,-1.60570235569,-0.599320146458,1.05618929973,-1.47036342112,1.14814616981,-0.245414197276,-1.86036272008,2.96957122081,-1.61679375941,-0.50189343957,3.2102935297,3.52676818145,3.37559696234,1.65133903096,1.07003903059,0.246458431642,-2.86996585644,2.9472088513,0.156860758686,2.65348488352,-1.65249707957,-1.10731408448,1.62994935577,-1.96909845304,-1.9090510476,2.51069158859,-1.65984114813,0.148115664273,1.10611308391,1.18241718985,-4.85953441229,-1.0049765752,3.88280249662,-1.75265659238,0.372608524032,-2.22002927662,1.18168715581,-2.87508345833,-0.676288569625,-2.44675108062,-1.55716385372,-1.62059798953,0.724381881496,-0.960783561886,-0.552230426264,0.121615798579,1.04462357852,0.118085120237,1.26606201262,-0.380661477003,-2.58578204132,4.03374155601,-2.25326988394,-2.88061044978,3.26819336615,1.91267201179,-0.19674664532,2.05710699236,-3.54867236793,-0.326269919106,0.752888089223,0.132116086772,-1.54644230279,-2.836589684,0.141382075407,-1.44156945706,1.19807019893,1.68431397116,0.438746488152,-2.06834516275,-0.842738093366,0.465043608979,-0.629041527666,-0.0120976683258,-3.00099798249,-1.73881566772,0.881273090875,-0.540746588847,-0.38645376593,-2.43880278615,-0.563591295604,1.477140512,-1.75295748363,1.76406287775,2.66264589914,0.484454554128,0.273973214982,-2.05206947308,-0.369256326252,-0.689306857174,1.66270560488,-0.131857610115,0.955091272134,-1.60116198558,-2.28544168464,2.11164102397,-4.18991734267,0.173959671197,-0.0354114097397,-1.4089728089,-0.311132524,1.89336391541,2.43192427419,1.01858890895,2.03606205304,1.62452822335,3.64225894583,2.28056802496,5.64531833088,-1.1566376147,2.07540663589,0.620578413989,0.750977221371,0.0162535885321,-2.16207619048,-0.105952032448,-0.117025236938,-2.50755272675,1.48142693144,-0.430885550216,2.23543980132,-0.326485130108,0.0243268507167,2.06152002688,-1.02234084951,-2.0303752323,0.561301589735,2.3433107876,-0.925805005171,2.80904484078,-1.94807647011,0.329007639042,0.397634451785,1.47111085828,-2.50084066219,1.09999789629,-2.99330297808,-0.0599839422321,-1.9690194292,0.960052060426,-2.19808352939,-2.01816409011,-5.65800942077,-0.0169289777679,1.16420775694,0.723551353918,0.643957264021,-0.140148446853,-0.056547111384,1.91572655252,-1.37543404733,0.484043939791,2.79265339713,-1.17311209973,-0.371278463653,0.469582405128,-2.31444814128,1.41635027072,-1.07100369346 +-4.16998558362,2.12975610028,-2.56134395649,7.28089529038,5.71112143199,-1.43967841108,-2.27770995537,-0.621412650546,-1.44766437213,-2.65973161459,1.36775091092,-0.475116016803,-0.587382383942,4.81157625596,0.770176066954,0.363275742132,-0.0876347057022,-0.475521533538,-0.0547252563637,4.64327842236,3.68908154567,2.63090462903,4.96261648734,-2.3996240147,0.249490239721,1.12136919369,2.95945439398,-1.5711039712,2.68638911406,0.584886546134,-2.50314228614,-2.72285134157,0.61815967679,-1.74822253416,-0.311564020118,-2.74809125702,1.47346679886,-3.40588476142,1.47545339028,3.02455658674,-3.94506848613,-4.14376579285,-1.73336583535,-2.40840473334,-2.22219073812,-5.15251653036,0.988312865494,1.78566960146,6.54388860067,-1.45725802938,0.214708279868,-2.72405630668,2.83319289843,-1.85521226009,3.58616267999,3.34310981591,1.02165599783,3.42570413748,0.846149519881,-2.93276470105,-1.80281494916,-4.22263733625,-1.52749340316,3.2283666563,4.42827975909,-1.44139790932,1.73660321256,1.17811784268,4.59021838108,1.89355262021,0.455512814919,1.27808425168,1.62865997315,6.70429563522,-0.847455751549,-5.35004572391,-5.12095170339,6.48116056124,0.300556570692,-5.01764505545,-0.875816748044,-1.82039844963,-1.25923923691,0.632047503791,2.15801657677,-2.92180285851,0.511598025958,-2.96027669827,0.547309962512,-2.98510901829,-0.335630682309,-4.73974208434,-2.01421547413,3.362338895,5.79285810471,9.42033552887,-2.91738398632,1.82035643975,1.98379708379,-2.70420178073,-1.48058941424,1.56434452216,-0.992579338154,2.37859466165,-3.72032371362,1.26282515267,-3.50253353516,0.00376921892301,1.18962185065,-1.0557041204,0.54337829232,-1.99295026461,2.62920855999,3.76263545752,1.2841622142,-2.72069926341,-1.80479015474,1.58534218073,2.60577425917,-0.440677909057,2.20203198473,-3.39447330793,2.79975073894,2.23906295717,0.677189537287,1.39489221702,-0.518861652811,-1.19545238594,5.21395279209,2.14497482498,3.99990809123,-1.70296090881,-2.09669830044,-0.502894639969,3.01051452478,1.25882732471,-1.28701953888,-3.64675308704,0.679585470159,-3.88040889422,0.100971349178,-3.87473366777,8.57528485777,-7.33635827383,0.620873548189,4.256256452,-3.20197622975,0.181273331641,-1.08387027582,4.7040402782,-4.30957582315,-3.2032131656,-3.55255149682,-5.39665594737,-1.43142532587,-1.0020887959,0.310152183772,-4.9755616792,0.544686280489,3.23141360442,3.48532564084,-2.27912784214,-5.4400074927,-2.9422715648,5.55690115452,1.07856818487,-2.60423706293,0.296417542696,0.018438497484,-1.6693427813,-1.97826829297,-0.649584023059,1.0299335142,-1.30126957735,-1.49028243661,-7.05598390897,1.53666977635,2.47103852113,0.548410004575,-2.33345104297,1.05941242347,-2.22456861824,-0.833920312524,0.616063261429,1.08299628615,4.64962686857,-0.85913300693,8.38019424758,-3.35722782453,-5.88692650636,2.48297270139,-1.82296590428,-1.72441059232,-3.50540684352,-4.86662904103,1.4669711864,4.01910547892,-0.666310483219,1.94299481273,-1.65633018176,-0.233463008008,2.92032059917,-3.11237916489,1.65681514025,-5.82044394652,-0.84150699973,5.2420919474,1.65209466338,5.1169664971,2.8554833293,2.7991078945,1.85252228816,-1.80552712282,0.913601561388,0.441482040088,-0.160765804846,1.5659571287,-5.15831661542,1.85946914524,4.30885611724,2.5515617756,4.66296468178,6.40177754471,0.323659792742,2.79168056408,-2.54396620949,2.11927359978,3.5409553499,0.143619238635,0.247531717618,-3.67236700398,0.0737643596032,6.4369303449,-4.20339368939,1.39238156477,-0.479590680996,1.23359161367,1.11356295109,-0.530017747878,2.8127275755,1.67139578978,-0.648806054595,-3.56483347257,-0.00777567660002,-4.97657731056,2.76010027647,2.79106523007,-2.92366722226,-0.381967118582,-8.20272569498,-1.22538543622,-0.975923561257,-1.2079847001,5.68413191756,-0.519274702668,1.34021991417,0.46834429979,-0.752738639987,-4.23064642449,-6.19847359916,1.9824349012,-5.77588344375,-6.11922142108,3.66428396702,7.66924429814,-0.776042481264,7.10654588699,-0.732527501781,2.01595049262,-0.872191261451,2.67919575771,2.4503210032,-2.90921337763,8.53517298381,0.212812230588,0.476091645162,0.748127258619,-0.886277671655,-2.89118565341,0.142637886207,-3.79416944186,1.11709731897,-1.30126662016,0.359220613638,-2.86900741637,-4.63997180067,1.53915568789,-4.55603598674,-2.03369594216,-1.81275931041,-2.69728669763,-2.77373948296,-0.780138870872,-0.710413366953,-1.87378830453,2.78039755662,-3.32990742207,3.18837203344,-1.00930721204,-4.34471332073,-2.7804454573,1.49880246004,1.22752761165,-1.44689382633,1.45333088478,4.27367163022,1.721656696,-3.6055589668,3.01899054011,7.5569880708,-2.61906720797,-2.57271003584,2.80881048858,-0.415334333976,-3.0628209281,-3.63716221015,-0.194801000356,2.79870586514,2.79689924727,0.0788984746723,-1.96187414487,-2.75171196282,-2.28218094111,0.444554739001,4.8369281887,0.373838265736,-3.15276482065,4.03460666657,-1.86244435867,0.253326237999,-0.800799566707,-1.74990467469,-2.74444140275,-5.73288337012,-4.91918236891,-0.418412837584,-2.99338801781,-1.38950726748,1.11461923277,5.90281201998,-0.707580384415,-2.67438790878,4.21448961059,0.828290172268,1.15630444248,2.80011883676,2.65575761526,0.483185992143,1.03626998862,0.131995103361,-2.91395613949,-1.43565141161,-2.69984012683,-0.626701895692,3.98586324195,-2.19652486801,-2.48867563566,-1.19348388483,2.79217995802,-0.750475711823,-0.945274029968,-0.126381392279,-3.6633948501,-1.54844618718,1.36196402073,0.468697243529,1.29018088311,0.94496485432,0.257892522415,-5.15796130657,-1.53281098127,0.595785883914,-0.833150585492,2.10806567272,5.13338648002,0.01430302143,1.24969169378,0.00611201127369,1.25787633081,-0.926280161539,2.16456234137,2.116730539,4.47622630279,2.12537882169,0.520683592956,-1.542467405,6.23520137549,-1.31958263814,0.309113717082,-1.16410690943,2.81666246732,1.45756631712,-5.58640872558,-0.689133227666,-1.21494281928,-2.40350431559,-2.07186533292,-4.34414368868,-0.898425387144,-2.84011162599 +-2.85525532881,4.14924573302,-1.27022984872,4.43080223083,1.04979521433,-1.7563615183,-1.1571517543,0.443647010723,-0.840120493175,-0.564384366473,-0.631840480766,0.532262438599,0.584832645258,3.23352189611,3.05675490737,2.79432141225,-1.4358461082,0.0141486930853,0.928806241353,4.37966580232,2.8490308106,0.783738804857,3.78208962361,-2.80982620994,2.02718123476,0.447202665606,2.01867037753,0.748949680329,0.626896452109,-0.226885780966,-2.62637141645,-4.79518300573,0.517160896062,-0.495881884893,0.551008209387,-1.1999056525,1.58518931756,0.092337232629,-1.19481320501,2.92050409516,0.70208245794,-1.14886969738,0.497751923401,-0.698487961093,-1.87117256582,-1.65841737827,3.39620117505,3.17374242703,3.50091727654,0.480773175558,1.40684746265,-3.40429907004,0.423096078237,-1.25402658423,1.40384977142,2.23528889895,-0.70792874376,3.44265838623,-0.298643459876,-2.92092214823,-0.387096325756,-3.39548440655,-2.21305868606,4.01884763082,2.1962247467,0.178924582303,-0.175330102443,-1.81287087758,4.0013677895,0.506375047565,0.164289975565,2.65211846734,1.90428843131,4.45052925507,0.60681405703,-2.01008831143,-1.829990381,3.47248803615,-1.04316819509,-2.40825766305,-2.3010283341,1.26562317558,1.44828870733,0.254433333177,-0.294035871825,-2.39190562248,1.16849062324,-2.10750372112,-0.213768513898,-4.53380696336,-2.05353827099,-6.3679600064,-3.59502876282,-0.357480708757,3.44140817722,7.012797233,-1.16484250784,-0.17219096899,-1.65201326678,-3.91428116242,-1.39317485134,-1.78935467323,-2.13693570018,1.49206449827,-1.47030715466,0.326555347044,-2.8691151468,0.987859331371,-0.0670162276435,-1.38699082017,1.38502636115,-0.891648494402,1.63707906797,0.654039901097,0.315870566068,-1.13308484296,-1.63928325141,-0.569100450525,1.42651925405,-0.627428011101,0.216225209237,-1.25899307927,0.828946293494,0.974174125592,-0.332280605535,2.90402588169,-1.104502304,-0.644741526048,-1.07491079171,0.416999756893,1.47221087893,-3.26141314586,-2.26964950522,-0.0280790646872,3.24086038212,1.20009862085,-1.75527016382,-0.539535063108,3.23909044464,-2.99914438327,-0.492613923551,-2.91626054168,7.31597944102,-1.64774904013,-0.73017560184,0.442671738662,0.283633226553,0.714817404846,-1.79878552278,5.11262804588,-4.30506066322,-3.61411044379,-3.82477523089,-2.89008922736,-1.73692337195,-0.71265813748,0.314715143045,-3.16757190545,3.47336832523,1.5834569327,-0.637929768363,-1.56214804153,-2.64970105807,-1.12900751829,3.98810140292,1.87983502666,-2.51413838069,-0.909131198054,2.46703845749,-1.16912671606,-0.352692016586,2.6085906589,0.711290110747,1.82539761384,0.137608984311,-4.09530947288,1.01127222915,1.98808420658,0.725776154994,0.456542024016,2.36024162223,-1.51671710104,0.909857604951,1.3748901693,1.41866263221,2.22546428785,-0.842200076581,3.517446268,-1.94564609289,-2.96543750087,3.66959119841,-4.30324907561,-3.19456482887,-2.38057807227,-4.43179172357,-0.982803171277,-0.41006461223,0.280178608544,1.95349114498,0.637461675009,0.711961734593,1.80234276384,-1.78083568494,0.520603844326,-2.37248194615,0.146621232829,1.95268532594,1.55047165434,0.825010337035,2.16551250696,0.958925328056,-1.03714228699,0.654975053468,-3.01727262656,0.247705178956,-0.0690905296781,-0.235510739784,-3.40891237398,1.3884248968,1.15451488764,2.64650440057,0.807570249241,2.08921063463,0.508586264452,2.52009829918,-1.11128878554,1.39935349762,1.06951609214,-0.485668144226,-0.460008237761,-1.70877252301,0.942621914198,3.41737226328,-2.40122259855,1.40087889274,1.62360543887,-1.58665239096,1.05352225239,-1.45161462784,0.468765456079,1.15845116933,-0.269039389293,-1.64486767074,1.02112615665,-3.15314137697,1.83668091496,-0.21584566613,-3.70026185195,-0.418916064101,-3.95508877378,-2.58916404287,-0.282405416965,0.0237940859794,1.56997692525,1.15945299725,1.77722654502,2.98457802137,-1.70026101914,-1.18428363204,-3.13462997307,2.47967257818,-3.06139141003,-3.33533022483,1.78348285884,3.65876099269,-0.542083423932,4.338555275,0.646300950845,2.75761772871,-1.33789882819,3.41355988423,-2.1038232104,-1.58832200845,4.30315493663,-0.497908014457,1.43125514845,1.23661852837,1.89458917022,-1.27604429007,-0.118665337562,-2.98061999162,0.96282290379,-0.317447299958,0.177331671019,0.190233225426,-2.25885749382,0.633996060689,-0.931709454854,-0.453512817619,-1.06709086379,-0.45003234585,-2.11921728969,0.742342797123,-1.2796056505,-3.18736832539,1.89475087484,0.647759524982,3.05645425161,1.20850815674,-2.71339397748,-0.888974133234,2.6798757871,0.973526877165,-3.10087224166,0.282148707707,-0.588648343086,1.1617284895,-0.947238893711,1.91763001402,2.77221791545,0.242102493444,-2.7309236304,1.19404949462,-2.29922574123,-0.496662088036,-1.43388394435,-0.541529648303,0.914798926115,-1.00208673149,0.693878029583,-1.63149386843,-1.92279982587,-2.83413622906,1.15527868609,1.48624739955,0.0722957122324,-2.01015367587,2.79194158167,-1.34159947316,0.350978424549,-0.150799014967,0.594457630018,-0.702615435521,-2.49834770679,-3.44722706755,0.724352367323,-1.91413194974,-1.50618719021,0.208274304816,2.56051458041,-3.38282206297,-2.67611726205,4.30181331436,2.60196872592,0.980345343721,4.28195017179,2.45016477822,-0.720569800933,0.134198579739,-0.29681619644,-0.620866637628,0.0668065834062,-0.820043117604,0.427079674204,1.07770038346,-1.89850125671,-0.367198590239,0.309245206813,1.49165853987,-1.93249949853,-0.770264412958,0.697864535651,-1.92503979524,0.36664308548,0.6772959585,-0.407557226819,-0.110297719638,0.0780190831417,1.13796422362,-2.93108891884,-0.108831601143,0.0333983715381,-0.582767866453,1.68451089442,1.07477574031,0.759609896341,1.02592154245,-1.07680930615,0.977406439981,-2.15689084132,0.897650267382,0.871076323589,0.485362575054,-0.271094031335,0.392738024197,-1.50007651523,4.16120113373,-0.87542103827,0.770962069035,-0.193105610213,2.63168554207,-0.0860587771735,-1.02318051895,1.64206330359,-1.97631421804,-0.459768193164,-0.987577437561,-3.05661367973,0.700944906869,-2.85832208077 +-2.40668356418,3.32200128635,-0.583146995504,5.17893602371,0.543722619215,-3.61351331234,-2.15219051798,0.154239282607,-1.86185589939,1.86499222438,0.546306239763,0.173791361054,4.68988918622,1.45787520011,4.61635592778,4.10645994823,-3.2520207723,2.82534058571,1.75578262289,1.28921755393,-2.56118538936,-0.681506864627,1.08718702157,-1.73322505633,1.85559087117,2.59411209822,4.86438429197,0.952494197489,-2.00043742815,1.56013310157,-2.24776257197,-3.37023128669,-4.3081034263,2.49645762126,0.0613088496522,1.5614004375,0.196160220802,6.71882646243,-0.515890210072,4.46806035837,6.49843154748,-1.07791967916,3.66291252851,0.340969046157,-0.717211693128,0.893422653279,4.23518612067,1.59024640679,4.00953623931,7.01554282506,3.3829888622,-5.28307714462,-2.56433442275,-1.21852455298,0.420509056251,3.97645592849,-3.46140729904,2.4203199927,0.499145697951,-3.22149805546,-0.210846113366,-1.82363392035,-0.608880066672,6.9203904438,-1.60331305107,-0.572833641767,-0.809020875096,-3.67446678479,-0.751598751347,-4.0169324255,-2.54423304001,3.43391434272,2.22814426263,0.720494257411,2.44403583368,0.126800663272,1.21261574904,2.80068611622,-1.46503902833,-1.02387386938,-2.22691595475,2.92893217762,4.35140001932,2.05282717824,-1.44687641621,-1.2482182169,-2.92161394775,-1.7117234171,-0.664106516638,-4.8541015784,-5.77170533816,-4.39334596634,-3.39425205867,2.10928462108,1.63525372922,2.20211301041,1.6979695034,-3.62859933059,-5.0955384318,-3.70584682147,0.913468626738,-7.92930506388,-5.18711395264,-2.14751714547,0.553891262807,-1.69585991979,-3.80843970299,5.93398868561,-2.32868751923,-1.18235898415,2.63725592931,-1.31388532559,0.924713171173,-3.68923300982,1.09287478288,0.447131590248,-1.02456968466,-1.82614021699,-1.27993409872,1.58124616583,-3.71338141124,2.08220694741,-2.52321253032,-1.8201927487,-0.489585822324,2.26087673823,-3.07679171085,-2.40032638788,-2.84321398576,-1.48280228813,-0.933238696854,-4.71049805482,-2.02947084586,3.95902432919,4.56408443928,2.77234577179,1.14790276547,3.24662017902,8.24014697393,-2.22661842028,2.16570036093,-1.47694238861,-1.56150964896,3.00861291885,-1.58600352287,-2.14261006952,3.36371217092,-0.277815688848,-2.55071312587,3.11163931847,-3.03255870501,-5.94063932737,-4.34915611903,-1.83065024058,0.344852973223,1.66785877029,-2.92896215598,1.02625600656,3.99294057846,-0.764026923974,-1.21331283232,1.14239682655,2.6062800312,0.555238639911,4.5995118173,4.17675596714,-4.47169959545,0.607188218708,4.99268372536,-1.10778329849,0.359094379742,2.3692166694,0.923014166752,1.39561937173,0.489449826081,-3.64099951267,1.49465563099,-0.864940508206,-3.8856684494,3.41578993161,3.80568179767,-3.16751228809,-1.90362671534,-1.0676062123,-0.827274825275,0.810656501699,-2.94211922248,3.80980886777,-0.505323204397,-2.70784498771,5.20668672403,-4.93021412532,-4.60470018069,-0.988903661569,-3.12164619764,-0.759834496776,-1.40789370815,-1.30719569206,3.67482577324,1.25514381965,0.729897277155,-0.221074349482,0.727831269502,-0.159013110398,2.35894515037,-1.60380238533,2.4536198473,-0.0437957082188,-2.46773814758,1.21704642216,-0.603128572703,-1.80407706489,3.83205666224,-6.8485059007,-0.767830495338,3.48311652978,-1.5156415077,-0.384740158121,-2.00051572681,1.33816781203,2.74709336281,-4.9876317811,-0.8754006657,1.1287828366,4.12337694327,-0.0656415704896,0.988705775539,1.21024437666,-5.10868624687,-0.0440934690972,0.0288263560326,-0.0765196786313,-2.51989612102,0.279547793863,3.3720527331,0.871332397062,-1.06302194118,3.50864712556,-5.51388967832,-2.0657237343,1.25920955737,-0.851355524064,0.682309628327,1.77832262437,0.827240066528,2.64016712666,-1.44682307978,-1.31921160618,3.49327129126,-0.484558734299,0.692844864529,1.00374541759,2.69691859166,0.154326318701,3.57687735557,2.06113112768,0.991898488825,-2.44635528803,3.95126618067,0.472989312112,2.33190206448,-1.30573364337,-0.437735764186,-0.251160595852,-4.47043835958,-1.51135720338,0.506121761999,-2.44358267824,0.0295987832554,-0.774288076561,1.33123704235,-9.26131312053,-1.16106868347,2.29522511721,-0.143810934227,1.58851175785,-0.934488321146,2.50735031327,-2.19833483537,0.610350404581,0.244342085619,-0.716844118736,2.41659238497,-1.20272970358,-0.134129219055,1.19137221933,1.4639560167,2.79779875596,0.0395902216937,-1.13805999756,-1.18215333223,-4.94711904526,2.09147545735,-2.13449596683,-5.07175304095,3.36638139486,3.70780602773,-0.945894616344,2.34982962509,-3.65934572061,1.50665946653,-1.83905771414,0.419523326158,-3.01953722636,-2.5896670572,-3.02772776922,-0.675756167273,2.18817773163,0.581919515134,-2.15692337871,-0.136594539186,-0.149565262596,-0.947531465589,-2.10921741764,2.44600348274,-0.959342634677,-1.03096477588,-0.498095233439,-2.70281470935,0.375763909419,-1.34648666104,-1.03886758149,0.246556117833,1.06395082176,1.52048031847,4.41094911337,1.58565980355,-0.538471474896,-3.59832179228,-1.84744771719,-1.98041345438,0.181751922071,-1.86992271225,2.09672110558,-3.00351278146,-2.34073953231,1.90364366372,-3.77574122826,-1.82476956447,-1.66754270395,-4.17944864114,-1.643569568,3.2956170102,4.84715448697,1.54389404712,0.413878052236,2.01489253759,3.26832122485,0.128817051054,5.05713614782,1.29822279056,3.8207182916,1.3051289777,2.15857723474,-1.16148341576,-2.10272764564,2.65485213935,3.33767395735,-0.225942747493,-0.0608929246157,0.386773107847,3.04139202913,0.880819526515,3.79432223876,1.34475161622,-1.15084494869,-2.72890689214,-2.20355211159,4.0270291551,0.831315397334,3.15832736333,-1.64833269834,-1.15337079207,4.42843692621,3.73798665524,1.77370616277,-0.414466093183,-5.21718411287,2.14873480677,-3.09902131875,-0.431480846305,-2.21315110326,-2.32947000265,-7.03267769655,0.620159295995,0.669400061817,-1.29065409263,0.639066412349,0.412046761511,1.52948790789,1.63768410901,-3.5861120669,1.49905408064,3.24001261135,-1.20556717555,-1.63470236778,-0.0621758023897,-2.13516124328,1.88267453392,-0.0397303390498 +-1.94168348829,1.77759615302,0.00324969291651,2.76537520647,0.356809294373,-3.04903903445,-0.571212081513,0.542071000835,-0.3627079765,1.24325743755,-0.427730951508,0.239423566062,3.11484637578,0.816348610718,2.79456279387,3.34600726088,-2.36868370374,0.960648590526,0.492966024081,1.63726032575,-0.520594614346,-0.710762829333,0.599766778151,-1.0725793888,1.89727054477,1.25175032437,2.9051876543,1.70391878923,-1.64619573633,0.92607907027,-1.19849523346,-2.36430278246,-2.74358758171,2.20087053259,0.111789479652,-0.408449259351,-0.328728172382,4.23223049204,-0.694623126908,2.42802311579,2.87807498376,-0.740068741241,1.62616318464,0.592944381834,0.159329541226,0.917487897477,1.93800289412,0.471566647292,2.55488344431,3.85311472585,1.20064109365,-2.55722387075,-1.1082152708,-1.02037551522,0.175513311128,2.44115464489,-1.72615523438,0.765665462018,0.977433196902,-1.83432733496,0.349625592828,-1.36312687636,0.715892488958,4.3416105775,-1.06046443701,-0.509649908741,0.497787223061,-1.32805623293,-0.711287250494,-3.12120837728,-0.91976089676,0.324390023948,1.0570640707,1.21327393075,0.919257143736,0.591331963142,0.457342879871,1.42952108284,-0.507037276824,-0.131746374767,-0.758843362331,0.87595297138,1.89250633915,1.27972093304,-1.20407567422,-1.03513803601,-1.98328871648,-0.134607525474,-1.06089170476,-3.55350990852,-3.87543780128,-3.44827607234,-1.73182748934,0.474614856841,0.146985151768,1.38470371882,1.86053468158,-2.31340465764,-2.91458182531,-2.94684989293,0.104040072957,-5.12927719911,-2.74529750084,-1.73663758914,-0.050694579085,-1.07352878233,-2.26959511399,3.31798489332,-2.0237891674,-0.758091919621,1.11872776558,-0.914398193459,0.528536718686,-2.51926944176,1.82730301281,-0.9518930234,-0.139356404841,-1.27573636502,-1.00687736809,0.303327493866,-2.61500696495,0.686027350427,-1.85459803333,-0.927233275571,-0.823916974465,1.41521901513,-2.20459855944,-1.11158712417,-2.42684030851,-0.775827880999,-0.958329697748,-2.08249924024,-1.46892851353,2.33831092993,2.1452542154,1.52739960074,2.11092267672,1.58193236212,4.76442153255,-0.500175990462,1.07725728969,-0.0380358799299,-0.134679699142,1.48794374386,-0.768634611766,-0.826269167265,3.30978691737,0.666516949734,-0.977266769807,2.49315859,-2.23554114183,-2.87566772004,-2.56910360535,-2.05376130993,-0.0498415172097,0.0825265093635,-2.29967758298,1.79486357128,2.97849754334,0.294754260181,-0.787193464239,1.07911070456,2.28606698573,1.0229565537,3.31828083058,2.20088501116,-2.9493214941,0.495515686273,3.67637633016,-0.658510478187,1.48509448349,0.636143160462,-0.197983719906,1.11500002464,0.257854927381,-2.22390707294,-0.292455268702,-0.33748634686,-2.67893269607,1.85805808067,3.25056247751,-1.99736208757,-0.936352628816,-1.63319216132,0.0197323211029,0.691710200409,-1.60304873069,2.2222357738,-0.494030532042,-1.08713753581,3.57224936565,-3.01036009913,-1.89224094709,0.0352522730817,-1.16673329006,-0.490160132845,-0.675803392779,-1.24046576689,1.81585133443,0.898491098881,0.944541202783,-1.58925671836,0.205119132002,0.531537915468,0.253908309937,-0.676644065382,2.24614178936,1.33602100372,-0.244497090975,1.76761640933,-1.35158142765,-0.414446250596,3.73523249467,-6.43518327713,1.29597712395,4.63066692571,-0.613321131865,0.561347877184,0.0711209956796,-0.127557443778,3.31162866752,-3.6926500086,-0.285006345312,0.5099318854,4.93547654788,0.868819684585,0.137249038219,1.1507523783,-4.40680729866,-0.0998956763242,0.600819382666,-0.423278113404,-2.2000334398,1.6212370952,3.27790774345,1.55507115324,-1.22907078028,-0.029405062197,-3.98268408457,-0.990495022685,3.63038349777,0.218821062246,-0.752823298723,-0.248150258065,1.06529252927,0.178199325207,1.01655516048,-1.81574172656,3.30965251942,-1.68384102901,-1.26371297797,1.5262250487,3.47741630872,-0.265562386513,-0.801813617449,2.82347845296,0.909660657868,-3.02272153417,1.71411415577,0.936149520477,2.0352847523,-2.75044326146,-2.03834780852,1.15331309106,-1.52446875255,0.960695721459,0.943234353662,-0.174043610891,-1.19030439148,-2.10528520733,-0.142430415601,-8.60760105293,-1.17194890261,0.969270079944,-0.40029415071,2.89137278279,2.72696355025,2.00195770899,-3.36749429703,-0.503019749323,-0.34973009636,1.07590580434,1.94033220887,0.0662941793603,-0.844664263724,-0.205224726594,-0.927714525858,1.82151385903,0.716765152912,-0.505078462759,-1.39169801553,-3.75972258488,3.03527408441,-3.67419142961,-4.80039191882,3.66278994322,2.01150090774,1.10328300466,1.56799778958,-1.6706875356,2.33891484579,-2.23406077822,-0.0790168158239,-2.29835296949,-3.19061029037,-3.09279531479,-1.82349063297,0.689713494777,2.95120071332,-0.454457020759,-1.06216011772,1.86404781302,0.412750553488,-0.312192496856,-0.901166524788,-1.71619956255,-0.00137017687125,-0.982375823656,-2.71353883425,-2.3097029229,-1.10547592401,0.557556620639,-0.718295222919,0.482262715501,0.333214447946,6.6798358191,1.22103029927,-0.80201618895,-4.47059836705,-1.35593414923,-2.29788345019,0.258600590626,-0.521844846309,0.437594954967,-2.18392724584,-0.493631593385,1.37908267339,-2.26255252202,-0.30756078084,-0.831326435408,-3.69798319499,-0.223937482238,4.03011242072,3.48706426779,0.441070608024,0.828923836351,1.9435341994,1.40788590272,0.878239062827,4.71550399621,-1.09901936968,3.4838750726,0.68982342432,2.16981327931,-1.96828734874,-2.21202177366,0.926186291775,1.88568594058,-1.7648316586,0.236547902774,2.64866965254,1.89112312635,2.27105943719,2.17706474463,0.199846277238,-0.520975260338,-4.22671780745,1.23446348428,2.73591025611,-0.260378292885,2.32260232011,-1.45908730944,-2.6201878802,3.38336368958,3.02514527659,0.979315823712,-1.99782266637,-1.60707169851,0.0483122205721,-4.34822138787,-0.213511068026,-2.52483056227,-1.12644841035,-6.88962921143,0.44326542365,0.096239015261,-0.0212235212322,-0.688477359512,-0.351519578299,2.47742046833,1.44010951281,-1.97519741376,2.6616740036,2.26513570666,-0.766266692481,-2.78300611377,-0.376965727806,-2.54099842787,2.187827818,1.03102740248 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_activitynet_features/v_test2.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_activitynet_features/v_test2.csv new file mode 100644 index 0000000..95ab472 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_activitynet_features/v_test2.csv @@ -0,0 +1,6 @@ +f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,f19,f20,f21,f22,f23,f24,f25,f26,f27,f28,f29,f30,f31,f32,f33,f34,f35,f36,f37,f38,f39,f40,f41,f42,f43,f44,f45,f46,f47,f48,f49,f50,f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61,f62,f63,f64,f65,f66,f67,f68,f69,f70,f71,f72,f73,f74,f75,f76,f77,f78,f79,f80,f81,f82,f83,f84,f85,f86,f87,f88,f89,f90,f91,f92,f93,f94,f95,f96,f97,f98,f99,f100,f101,f102,f103,f104,f105,f106,f107,f108,f109,f110,f111,f112,f113,f114,f115,f116,f117,f118,f119,f120,f121,f122,f123,f124,f125,f126,f127,f128,f129,f130,f131,f132,f133,f134,f135,f136,f137,f138,f139,f140,f141,f142,f143,f144,f145,f146,f147,f148,f149,f150,f151,f152,f153,f154,f155,f156,f157,f158,f159,f160,f161,f162,f163,f164,f165,f166,f167,f168,f169,f170,f171,f172,f173,f174,f175,f176,f177,f178,f179,f180,f181,f182,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f193,f194,f195,f196,f197,f198,f199,f200,f201,f202,f203,f204,f205,f206,f207,f208,f209,f210,f211,f212,f213,f214,f215,f216,f217,f218,f219,f220,f221,f222,f223,f224,f225,f226,f227,f228,f229,f230,f231,f232,f233,f234,f235,f236,f237,f238,f239,f240,f241,f242,f243,f244,f245,f246,f247,f248,f249,f250,f251,f252,f253,f254,f255,f256,f257,f258,f259,f260,f261,f262,f263,f264,f265,f266,f267,f268,f269,f270,f271,f272,f273,f274,f275,f276,f277,f278,f279,f280,f281,f282,f283,f284,f285,f286,f287,f288,f289,f290,f291,f292,f293,f294,f295,f296,f297,f298,f299,f300,f301,f302,f303,f304,f305,f306,f307,f308,f309,f310,f311,f312,f313,f314,f315,f316,f317,f318,f319,f320,f321,f322,f323,f324,f325,f326,f327,f328,f329,f330,f331,f332,f333,f334,f335,f336,f337,f338,f339,f340,f341,f342,f343,f344,f345,f346,f347,f348,f349,f350,f351,f352,f353,f354,f355,f356,f357,f358,f359,f360,f361,f362,f363,f364,f365,f366,f367,f368,f369,f370,f371,f372,f373,f374,f375,f376,f377,f378,f379,f380,f381,f382,f383,f384,f385,f386,f387,f388,f389,f390,f391,f392,f393,f394,f395,f396,f397,f398,f399 +-2.50391422427,1.68599787994,-6.01226188664,-0.125473405835,-4.05747392075,6.31113406836,3.125083399,-1.28819161128,-0.594363160034,-4.04687042561,3.33266554158,2.05021273438,5.06569788016,-1.51135614382,-1.75754686884,-0.330255823582,2.89510802927,-0.73977406509,-7.89353751824,-3.45772308633,1.17079686934,-4.14460512795,-1.39475490187,3.86253584502,0.447348279778,3.92883117367,-4.46848521844,-3.76229701362,1.69349113829,-3.27463325871,0.924009592578,2.12999677853,2.85659594768,-4.17102590297,5.99293164916,10.2884632288,1.83231558377,1.4478797998,-4.38947245616,3.90167659309,-1.85908630842,-3.78404481822,-4.00131390917,-5.05896560394,-5.12547527286,-1.43005141799,-0.799648821025,-3.57910595264,-2.2926393485,5.31605148185,-4.44407908701,2.30758203368,4.12896344555,-2.10192899924,-1.57365770347,1.46184540219,1.02006796352,0.693975594963,-0.882507590565,-0.268305251769,-1.78810432009,-1.44049936972,-1.30807676828,-2.54602796889,1.91918086343,-1.87330246853,-1.19116743588,-4.94173944111,3.41346881759,1.04477840726,-3.87883468949,-1.6401990057,-3.11963649974,3.10739194639,2.00107403406,-3.01992488162,-2.17734208151,1.18544464156,-3.26027744456,-1.38117784752,-1.12807281493,1.23731617227,-4.22769494609,-2.31104123998,-2.73342858264,-2.60609814517,-3.91516964902,-1.43564934755,5.86923505644,10.8698481406,-0.0644558026284,1.29974983175,11.9821762355,2.63645925008,-0.800439528532,0.305979802689,10.4448009584,3.89998507623,10.3629906773,0.987935663397,1.06111665476,1.15934493999,4.74597180691,-0.53357543254,-5.53819862455,-1.08892905758,-2.84128587559,2.54403880204,3.08628575869,2.26009004126,2.77060999349,-0.582374569877,-1.77802346002,-0.2937931835,1.02838354244,3.37142584142,-6.2468072647,2.20336157741,4.02669576097,7.7139954797,-2.62292807265,-1.63856477894,5.24209850422,-5.95689444574,10.9237309757,5.56173629091,-0.06239338509,-0.11586309122,10.5260359799,0.0455641002992,-0.143587274683,6.85981490484,1.30256727268,0.099060309792,-0.99507694974,-2.39523977029,0.646837872527,-0.549287130061,0.528060432284,0.478981495421,-2.87669151504,-1.24631201746,-2.76280551886,-4.99648601327,1.56782352093,1.72098800023,-0.0553381940814,-5.35496277362,-1.12433242997,-0.526286978024,4.84426140262,-1.67891876845,-0.0265676538691,-3.17656040053,0.26415708479,4.03517758548,1.4993594204,3.83278299704,-2.77651900406,-0.861125229206,11.2030357751,-3.15313750697,-2.50459314309,1.78739187732,-7.82420403389,0.904809594012,-4.18456179152,0.60995901817,-1.44564015234,3.83168430104,-0.00437937539048,-2.3451228437,5.58568740368,2.97791145801,4.32271502614,-1.54512997459,0.536759116431,-1.1815032758,-3.14896126398,-6.86535051022,-2.70346348657,0.0113500145858,-2.77794979296,2.35137890776,-2.64285167165,-3.95364762035,-5.22867795339,6.15572625407,-6.91736113212,-1.52054794698,-2.80880080933,0.30321730122,-5.91560237718,-7.42976562356,-1.07937648743,-3.26394725639,5.0495641506,-0.553299233738,3.96384933141,-2.30659410078,-1.92410211898,-0.0740623548288,-0.741995456365,1.25729537246,3.06146581722,2.64592689772,-0.768545938319,-0.368544330909,-4.14440217226,1.39461226592,0.549227126659,-2.66866894906,2.50084337145,-6.41121511041,0.753405646177,0.280067476256,0.0344201303652,1.11097541213,-0.756136736626,-0.134220118965,5.6025168238,-2.69538654726,-1.20349766834,-2.90915489789,-3.07136878235,5.78831844318,4.79880530822,-1.54153241949,-4.93687499883,-1.02846407186,2.11793406884,1.81036372992,0.928447236083,-1.67445344365,5.93752378918,5.25534441684,-1.32955752029,5.02874157984,-8.32498580794,1.22665544488,0.729978278127,3.76998885216,1.18933444305,-4.01561953996,-1.91036380149,-2.01600540918,-2.19074894269,-6.06838036269,1.91566910093,3.16219263298,-5.36112836713,-3.03646755643,2.60723549671,-4.73392456058,-1.27864055974,1.65558185437,0.35871136493,-1.97445669054,2.00282359886,0.766041404302,0.935142604145,0.146960995005,0.90301123882,0.584378651645,2.43738964301,2.14986027277,2.13076803503,3.4849176696,3.37372560032,1.19906408345,-3.25606738189,-7.18101082565,-1.28755031363,0.930275378818,0.638566405974,4.33632120663,3.7835789624,3.41258601273,-0.279865853117,-0.651737863704,-4.7223025058,5.75545690528,-0.820105519292,-4.00676441302,2.11396374954,2.60952237005,-0.820631582523,-0.546553676079,5.33481172893,1.34852465273,2.93794032376,-1.33422280837,0.00903616898423,-2.36627310158,-4.99107783527,4.48972757256,3.85615534734,0.528791357535,5.58767522678,0.127227772965,0.973913995567,-1.8062694088,2.32322553868,-0.442473914737,-0.123751487135,-1.67863033336,0.0891421785383,2.82212784306,-0.478511586228,-3.3537200428,-0.522387139102,-4.25974474021,2.87018204241,-0.111813521457,3.94839403804,3.74490500576,-2.30623158975,1.49538655047,0.530469396242,5.1296629385,-0.453469798231,0.306027388129,0.35104102143,-2.34272025863,2.87870763106,0.212640115016,0.719817214469,-0.20345939615,-0.506974699062,5.3592568385,-2.28140813929,2.88992723737,1.65410613199,4.48693866632,-0.09672872709,-1.87582435405,-2.46928755752,-3.56278716312,1.74785164057,2.74009034813,-7.29490411233,-3.16100976408,0.847520336401,2.92602454656,-0.0986801903656,-2.16201799224,-3.39690165524,1.53765563161,-1.41997380147,2.71161737728,-0.0167333157083,1.75945290337,2.10004583364,0.765974609689,1.79493778887,3.43569638106,1.49552039321,1.90617850633,-0.592973705882,4.00305455331,0.0335191789012,1.05186070161,2.48385107847,4.89055257951,2.06091725733,-0.18432842804,-4.0123498625,-1.32194922277,2.87064841629,-2.07818711219,0.695646315956,-2.8474977249,-0.372025591391,0.277543174562,0.348284025789,-0.54074715731,2.48928393808,-5.685446576,-1.66416304574,-7.02726226008,-4.88155203391,-5.57406386037,-4.91916411608,-7.94337537982,-3.65389317081,-2.97659988583,-5.97952768511,-0.575712613136,-3.38044490327,1.89594224776,-0.106777342905,-1.21814931744,2.66339186237,2.37583883107,-2.34277046832,0.0847875222918,2.1196259109,-2.034442402,0.994460807731,-5.99126604669 +-3.61196599602,1.54396823943,-7.05199570656,0.70936037898,-4.42450754642,5.79873381853,4.79998759627,-1.51375595927,0.041889913378,-5.36947724223,3.11711617708,1.87290850281,5.37537143231,-0.140440261367,-1.07927534082,-0.8091666732,4.91609726548,-1.47799203396,-8.695467484,-4.09717354178,-1.04496299029,-3.85961924196,-2.10038466751,3.32289713025,-0.286860848963,3.96218072772,-4.39675701856,-4.40787660479,3.73622534722,-2.87716412544,0.454319910706,2.42820411325,3.82069679498,-2.79692421705,4.38538633883,10.2156878471,3.4358463645,2.12645539939,-4.04702971578,3.87549848557,-3.44834155142,-4.70891635418,-3.76960349679,-4.85522414446,-4.31793097854,-1.22963698059,0.447048375012,-2.53883199245,-3.42271156311,4.74730663896,-3.28625443876,1.15255518705,4.48008643985,-2.00973020792,0.25715895891,2.01633035838,1.72455749959,2.46865062863,-2.55920924097,-0.941734179414,-1.01115750857,-1.55530408025,-1.35561941266,-1.23846808225,4.0139059037,-2.82922329605,-1.54500077367,-4.14823132754,3.46829478144,1.42298098058,-3.60501238108,-0.478655001521,-2.27799000442,3.80441823602,0.555091810227,-4.56343603134,-3.86684781313,2.51266635656,-2.34452754557,-3.54211790189,-1.63034411222,-1.93864814639,-3.73451783657,-1.60328631774,-2.4672175467,-3.80095796585,-4.04769252539,-1.72506986559,5.59767432213,11.0820033073,-0.191732565476,1.90799899697,11.6760043621,4.55487689376,-0.31670263633,0.824923895671,8.5647937417,6.5042055428,11.780738759,1.50271001905,-0.0258838802575,0.435441556572,3.30290358961,0.377896644174,-6.5453125,-1.00815881342,-4.10386363864,1.63551698476,3.23607475758,1.42431855202,2.55384192467,-0.456127088517,-1.94804133773,0.550055715443,0.636448504358,2.32128318697,-6.70778397321,2.73787901104,3.27784690857,8.87038059237,-3.74099546671,-1.75985428691,4.34281664491,-6.43530688286,12.9979223013,6.78234988451,-0.806176937745,-0.697792875396,12.720209074,1.51877520681,0.540385435523,6.74378789664,0.843219137377,-0.0813938416541,0.253477528694,-0.220510208608,-0.133373232186,0.959342181682,1.10231779218,0.231312006339,-1.99769770503,-2.40456032157,-2.95679311156,-5.95258055926,1.98243983686,2.28856839836,-0.382299264148,-5.90337668657,-2.26504155695,-2.81989197582,5.54886015653,-2.23119397462,0.655153363942,-3.77459974289,1.65176175833,5.3708147645,0.977352631095,1.60295453668,-4.00599938631,-1.69029248208,10.0866486311,-3.23101823926,-3.1206391573,-0.391065824031,-6.68118602037,2.16630054861,-4.7760153234,0.383674836252,-2.48520847857,2.07149813026,-1.99720753431,-1.20698849112,6.08765767813,2.54862617255,4.67334094047,-2.9711537391,0.948479612171,-1.01456621587,-3.11699818373,-6.72917854786,-2.92183075547,0.496130555124,-1.61810724959,4.37298168838,-1.93378492743,-1.86215627491,-4.90786517859,8.62715418338,-7.5756526351,-3.27301322818,-1.76513157338,0.75444869213,-6.96635819673,-8.78930687905,-1.7524562791,-2.41629351974,3.68741244673,-1.43222312816,3.23068808318,-1.59724262357,-3.27234983742,1.24265492261,-0.0109941303718,2.80159805715,2.48849355877,3.07970299125,-0.557770807296,0.432648000119,-3.69374324679,0.0467125833038,0.424763832987,-3.38139162659,3.42404463887,-4.51077946425,2.03796033263,0.507232870907,-0.506469908358,1.50909484178,-1.27529992908,-0.255473581143,6.49730739594,-3.27221466898,0.583703720573,-2.57865453363,-2.25019647181,5.4004673481,4.42697024941,-0.0842542231125,-3.7730645895,-0.905618444086,2.8413999021,1.14175421931,0.425801990927,-0.551772788169,4.81836385727,2.67149700224,-1.60633691549,3.67677226961,-7.09939215183,3.07843704373,-0.603567731382,1.07058879137,-0.284542271494,-2.65182375908,-0.966910338403,-2.21251030267,-1.5918459788,-6.73685925007,2.16504070461,3.16708334088,-5.73397156,-0.0308346152315,3.96178902388,-4.34651784301,-0.626209998878,2.96317673624,1.55037861467,-1.6240209043,-0.916502046583,2.22772178277,1.73989147246,0.425792780239,2.44748416841,1.27179720402,3.01824558973,0.45870998502,1.6810954839,4.9340551734,4.52931187153,1.22776987255,-4.30461632609,-8.0007350564,0.293104887008,2.59760651291,-2.09017359019,2.84267843664,3.92640956045,4.39850687385,0.263943502309,-2.52996243984,-4.9456074357,3.01140740514,0.060671949388,-3.45182769299,3.45659797787,-0.717935073377,-1.70038859993,-0.159526935219,4.78994245529,1.73284136951,3.39466386437,-3.02896884084,0.745040215552,-2.42295794487,-5.48635936975,5.81924671531,4.81498251557,0.588836860656,5.34480842352,-1.69491340667,-0.931661537289,-1.47670565099,1.95115838945,4.33551876547,-2.35900820047,-2.03742983938,-2.51175971031,2.00818323493,-1.02861073502,-2.83876619935,-1.42532885447,-3.22665929496,3.24723680019,2.50910392105,1.66940991878,1.98924016655,-2.976414603,2.39372268021,0.0301794916395,2.93753557801,-2.53472368196,-0.224031038582,2.22086050436,-4.60367997885,0.344105190041,0.892087735609,-0.732750460502,-0.0278959076854,-2.04538312331,4.39118845462,-1.92525613308,2.48760456741,2.12224386633,4.20933679342,-0.160378366895,-0.847533833979,-2.68713091612,-2.85529101193,1.45633238703,3.13940095305,-6.84778351784,-3.07674325108,2.9240462061,1.66283178181,0.366562292727,-0.474471753836,-2.22659401149,2.12781591714,-0.698044653983,3.11203145981,-0.0878812848356,2.08509909212,2.37360790372,-0.383632448313,2.85876693129,1.43884898126,2.44588458538,1.13197429609,0.669784083962,2.82567384094,-0.303028093278,0.0804680705045,1.01148720384,3.96722738147,3.78676999509,0.484674140066,-5.0017509222,0.154588726159,2.53468632102,-2.48899200261,0.211847947538,-2.28771493435,-0.277051561698,1.01623694403,0.347248692065,-1.88412645785,0.431219244007,-5.62209599018,-2.32514169514,-6.17786878348,-4.5459565401,-5.45559768676,-5.25804600716,-7.30329209566,-4.18787643314,-1.41929989755,-6.36565381289,0.691979244352,-5.4266118586,0.243365764617,-0.33372869622,-1.60025772154,2.65902011394,1.72226278037,-3.51518207789,0.837280854209,2.64499332011,-0.451456475259,4.05596930012,-4.51415959 +-4.72683149606,1.45348708808,-8.07086817742,1.63604789376,-4.73549800873,5.20675960303,6.51230325818,-1.76839387298,0.728590119478,-6.74178866983,2.8130164218,1.58456622004,5.62148933888,1.37578496694,-0.371593541978,-1.41557620727,7.0383985126,-2.29083102226,-9.45700079202,-4.80206114411,-3.43986400128,-3.55278479934,-2.83554306328,2.7735268724,-1.13780232042,3.92281681627,-4.25488941192,-5.10927104115,5.96552311688,-2.43940485954,-0.0862283119556,2.73895709873,4.89024929762,-1.2763541922,2.57022780523,9.9613841939,5.07362765074,2.82582543075,-3.62501172424,3.7390643692,-5.19941673696,-5.66170942306,-3.52688271404,-4.6018137145,-3.43782470346,-0.992310488373,1.76652944327,-1.43113125652,-4.60094419718,3.99586562991,-2.03482079327,-0.160126103461,4.7740121144,-1.88776037335,2.26084538698,2.65253681004,2.54412336618,4.38450802416,-4.3977601847,-1.7176710071,-0.0724306311467,-1.70681380391,-1.41692107796,0.200332455933,6.24482979595,-3.83351793349,-1.88694544792,-3.24113301516,3.48263311743,1.83456811458,-3.1987385869,0.769642335775,-1.36940517485,4.47494917393,-1.01712017417,-6.15526720286,-5.62981627226,3.9166711688,-1.23287549198,-5.84563351884,-2.13252854615,-5.38287308335,-3.12790068805,-0.774887352436,-2.1297221756,-5.0906492424,-4.12367990136,-1.97023809493,5.23813544751,11.0778312242,-0.275825666287,2.59604639888,11.1118171802,6.55417260289,0.203035293669,1.38965836696,6.4515772891,9.32944820284,13.2775346517,2.04594562918,-1.18929040372,-0.312611132264,1.6740041858,1.40754847616,-7.60108621597,-0.907561735809,-5.39238245725,0.626936522051,3.35088065982,0.46351477623,2.31236622334,-0.229608643204,-2.07551843763,1.55680642903,0.263669897775,1.0858634612,-7.05738488197,3.32455673039,2.40335632682,10.0899427987,-4.92568675757,-1.80175588966,3.28225847542,-6.88330174923,14.9608820614,8.02759130716,-1.60224438258,-1.24848374822,14.9900966168,3.09142677188,1.2888044706,6.5442295146,0.330789602659,-0.286123776287,1.62822659672,2.06531837225,-0.982651502788,2.60571396113,1.63691263556,0.01017631717,-1.03312850952,-3.68506930947,-3.12813932538,-6.89839523554,2.3975418067,2.95167421162,-0.811870859787,-6.43306355715,-3.44969232738,-5.32219609171,6.3486418271,-2.75835331619,1.37597230494,-4.40136899472,3.19074914694,6.78243587256,0.445585229398,-0.808829127549,-5.32398023844,-2.61561192304,8.69628513216,-3.31122705817,-3.75478894711,-2.72484310418,-5.34768217325,3.53855306476,-5.38706000924,0.145446739923,-3.58612233102,0.120355840028,-4.15744045019,0.0731746891131,6.55438641787,1.99956796408,4.91731314421,-4.42644771397,1.40971697062,-0.784811406731,-3.00484983444,-6.53485749721,-3.15200479388,1.03534908369,-0.301970368177,6.51142239392,-1.10611675471,0.418995252622,-4.4721977675,11.1724257183,-8.21665349245,-5.11762260079,-0.615411399901,1.18636612185,-8.06906448126,-10.1247596884,-2.49426667422,-1.32065032601,2.17061477065,-2.33631666951,2.38926856876,-0.913166025876,-4.7118704623,2.72928834141,0.775672697726,4.4457443577,1.71014433921,3.57591197133,-0.235582885593,1.25215531408,-3.14634150744,-1.4078004086,0.365033659041,-4.17761438727,4.40297134757,-2.42336025477,3.4580388701,0.689679874331,-1.04557964027,1.87770598858,-1.80380414367,-0.417696796171,7.45841611862,-3.81225969553,2.56200723887,-2.21683688522,-1.32409115911,4.95071142197,3.92624093532,1.46352795839,-2.46225001812,-0.77849281013,3.50410349012,0.434351972267,-0.0288636657596,0.669223650095,3.49293913841,-0.137764969467,-1.96554630518,2.1402142328,-5.7265598917,5.16214273542,-2.05637966395,-1.8495585683,-1.87955528319,-1.25644548416,0.00674796104395,-2.43147389591,-0.893102669418,-7.49637273312,2.34914988339,3.13358963132,-6.12764425039,3.23036705017,5.41211955786,-3.91730147004,0.0444684042034,4.39211372912,2.92113072753,-1.25977230668,-4.12997387886,3.87697173372,2.66106281221,0.736292763781,4.03323895753,2.06197661877,3.74714529276,-1.27023549199,1.21123526514,6.49754122019,5.87128979206,1.2765970856,-5.3870420897,-8.90884536504,2.01509624004,4.4446681577,-5.09674575568,1.20312212527,4.04149165512,5.50566021562,0.953406482342,-4.55933359832,-5.21267021895,-0.036395560507,1.1284481287,-2.80024212361,4.99020810008,-4.41919901133,-2.62691727608,0.226202066541,4.16152264595,2.0979556495,3.87861913562,-4.9043425262,1.60233154863,-2.46861632347,-6.0349463439,7.17580538869,5.88561519026,0.718002053499,5.10737453699,-3.68287960738,-3.00543767631,-1.03803471714,1.53446617425,9.3747028375,-4.76337719411,-2.39580845952,-5.3522044754,1.13427948356,-1.6372946959,-2.29562118411,-2.37800694928,-2.10207263529,3.68294849873,5.38075784862,-0.940855975155,0.0137967544802,-3.74462119222,3.33829092682,-0.57550301969,0.537392029762,-4.84174327537,-0.825007719694,4.19546295956,-7.04726793528,-2.39606908321,1.61995286934,-2.34724253952,0.159427139386,-3.66048334882,3.28457990646,-1.59395935536,2.02604223549,2.65396766722,3.91925804377,-0.170175538174,0.293078864813,-2.97810955763,-2.11363542974,1.19750591725,3.54246556639,-6.34636378288,-2.98813998103,5.24311850287,0.266658103764,0.848274391745,1.48310565829,-0.99932412535,2.74228922785,0.028886015862,3.42641401768,-0.174800014277,2.45710129201,2.67823993087,-1.63095737636,3.88755993008,-0.699719142316,3.417716069,0.163006665744,2.16666536272,1.66770118028,-0.553962221444,-1.03107923508,-0.689737435581,2.84424331307,5.59421723187,1.20365538374,-6.0307972002,1.79253649413,2.07976007581,-2.97050522506,-0.320198328197,-1.71101762295,-0.148553741649,1.92997103455,0.389586392492,-3.34172380107,-1.60005307674,-5.45010868966,-3.076508376,-5.23991111994,-4.07970976352,-5.24768321514,-5.51570352555,-6.46153886914,-4.78648862958,0.280570674728,-6.66282331825,2.05202573478,-7.5744939363,-1.66311737061,-0.568106225319,-1.98653774977,2.69276298046,1.04291445166,-4.88652718305,1.6799737481,3.19981912076,1.09642167091,7.33881660357,-2.92239319682 +-3.95618640951,2.16822504699,-7.02749201775,2.07438584924,-3.7952008903,4.66516063452,5.66598080516,-1.93683131397,0.83286083467,-6.31038688779,1.93803728581,0.415994385479,4.63695873261,2.03064954996,-0.546765608815,-2.54600209773,6.67720080018,-2.60086139083,-8.36665858864,-5.08000973701,-3.84362360537,-3.51486208201,-2.64075744003,3.07348869205,-1.94571852326,3.0428294871,-3.48582068503,-5.26945194721,6.5893364191,-2.27115260124,-0.558212063015,2.65741990924,5.38911813021,-0.610317340195,1.36496483032,7.88430027903,4.24496084571,2.5491838041,-2.95291282773,2.46365449905,-5.8806508565,-5.27971760869,-3.57540645719,-4.17462575197,-3.20521330357,-0.712461964526,1.66458856776,-1.43753664225,-4.29921654403,2.28583934903,-1.82383457958,-1.12579432636,3.8323690407,-1.60873620778,2.88645622611,3.1870587337,3.35539863348,4.68089458585,-5.01220222473,-2.40511398852,1.23198447682,-2.04995642841,-1.54208872378,0.738531426192,6.23694182634,-3.66800229013,-1.47559821933,-2.51566377998,2.96481087386,1.93647179783,-1.85266061902,0.897218961718,-1.2290535754,3.62848708004,-1.39016747028,-5.53799726665,-5.19588583469,3.79989851355,0.365908132196,-5.86183534264,-1.74588927373,-6.0965897572,-2.17361679807,0.099301538021,-1.49651467532,-5.28756560326,-3.35764337569,-1.22807119251,4.41288296581,8.37310397655,0.329299056678,3.0666925776,8.31520066255,6.03162533879,0.254658643305,1.52927615046,5.15474370718,9.92706954478,13.1178707933,1.9851475221,-1.25251645445,-0.040588879585,0.598402907254,2.09637820482,-7.39962798595,-0.736607771963,-4.72784618586,0.148764773328,2.82482881815,-0.363951296807,2.18847515703,0.851648719757,-1.44513312698,2.82303802848,0.789665968129,-0.284895439446,-5.39480451405,3.52706449866,1.50199447424,9.94445934776,-4.85012166024,-0.775828022365,2.07768519119,-6.15859429717,12.0614514388,7.37984260201,-1.64554053068,-0.434650133851,14.1951656962,3.12879480362,1.52092895806,5.6518155706,0.0597475437445,-0.432820611596,2.15243572235,1.70108392119,-1.19518387556,3.0659382844,0.729992161989,0.512096637264,-0.702464946806,-4.23238757848,-2.71316921115,-6.04356548428,2.08492669598,3.63833817005,-1.76652027816,-5.79197620272,-3.09022756994,-6.01349622488,6.92608562946,-2.03923279405,1.31198180869,-4.27980091691,3.90416300416,6.64981202126,0.73166857958,-1.23485268474,-5.4199275887,-3.10880723954,6.33416883498,-3.2787891686,-3.49453917981,-2.87733795069,-3.98702534318,3.87149213552,-5.16316780805,0.178835353982,-3.50880401373,-0.771996193229,-4.59445316195,0.868211128412,5.75491086721,0.921819759609,3.39493911088,-3.67554339618,1.67544182837,-0.174868727922,-2.08721256792,-5.95615169048,-3.12308293462,1.30280533791,0.644019361586,6.33218312264,-0.25693573624,1.04176057992,-3.36895969659,10.1426500809,-7.50808531523,-4.85486101508,-0.170589606464,0.612994321586,-7.87276499986,-8.79793308139,-2.78509446978,0.942439908986,1.39931613266,-1.95726648182,1.68011825532,-1.75475023031,-4.74921035767,3.71489373327,0.868516312915,4.43326895118,-0.263135685322,3.9764669311,0.911694865376,0.85224120736,-2.35560669035,-1.62565724194,1.2212044698,-4.61154775619,4.34895780444,-1.68536224604,4.06422766924,-0.0101673817625,-0.609392880799,1.22532760024,-1.5149737785,-0.805999085308,7.55067921162,-2.93719872087,3.43533396363,-2.10260034561,-0.721583162695,4.52110221148,2.69720968336,1.40812491387,-1.62846618414,-0.822517428993,2.23470644593,0.491862057373,0.920802225173,0.962496383188,1.928562572,-0.802637988328,-2.72160144806,1.0092707017,-4.93745543241,6.46554609537,-2.43392473698,-2.37087579571,-2.17133839786,-1.93240495443,-0.362681306601,-2.54449704886,-0.17978616923,-8.05280478001,1.39086142182,2.67881788671,-6.08614060402,3.92572582901,5.49754135013,-3.72346940279,-0.242022804468,4.81397798061,4.11047571898,-1.36651873588,-5.34488024235,4.95870956659,3.41118116498,0.89432107985,3.33253220856,2.74165137768,5.04070746183,-0.415948872567,1.31926612794,6.72856174469,7.17419068098,1.49098495662,-4.98007160067,-9.318038764,2.46224850535,5.27640871287,-6.26628448487,0.635381773711,3.60578859449,6.173201437,2.24732711256,-4.89329962254,-5.55538270712,-1.49875565291,2.64946635843,-2.09067063332,6.20336785316,-6.25677093268,-2.50105109721,-0.0861860245474,3.59812706232,1.57726798058,3.84794261813,-5.72557672262,2.46239029348,-2.29553559303,-6.28103302002,6.47278197646,6.46319063902,1.48405849189,5.35767221928,-4.23237529636,-3.51878979206,-0.00904786854982,1.29577608407,8.77539933744,-5.03432886004,-2.11539484441,-6.16999167681,1.0546652633,-1.90332779229,-2.35973056435,-2.26917619407,-1.82008438647,4.08268388271,6.31470301866,-3.08372749806,-1.22069035709,-4.38186541558,3.19182102323,-1.42976873428,-0.223793095648,-5.89660835981,-1.25134502113,3.99110957295,-7.45729860783,-2.86559789747,1.66721295506,-3.13464591861,0.162813140824,-3.38049943731,2.39996716856,-2.15944387913,1.63885930896,3.04169135332,3.98578349114,0.511457957626,0.823394746482,-3.67019996286,-2.25544205963,1.80545994013,3.28000457585,-6.05162557602,-3.00187867403,6.49878694773,-0.326051785648,0.684602611069,3.36035886407,-1.228521097,2.57487190307,-0.46660696879,2.10812581897,-0.305482617393,2.75176966548,2.83328473449,-1.89653189778,2.65913075805,-0.83185869336,2.94031493856,-1.53106848534,3.9481344676,2.79967945367,0.710376281441,-1.93211027801,-2.24844452739,1.20713421225,5.22792970717,1.27727831364,-5.73701616764,2.55549032926,0.93986610532,-3.48593280315,-0.51567519635,-1.94204506159,0.172434514092,3.41956290126,0.900014420896,-3.65240677357,0.294835821394,-4.22226468399,-3.63110159874,-4.85140349388,-2.80221052408,-4.28761808038,-4.3011406827,-4.58334078341,-5.13591312647,0.760158468181,-5.32113479346,2.1639226532,-7.19870259762,-3.37775546551,-0.481121961772,-1.74219072804,3.14396611452,1.24187298924,-6.32387711763,2.16209208607,3.14260455966,-0.531431690456,7.58907546639,-2.70918695331 +-1.8262197373,3.46346980632,-4.49737847328,2.16065784335,-1.95281272531,4.15987870455,2.97505878091,-2.04312422812,0.517240521534,-4.57863372207,0.651493945123,-1.38716154456,2.76521640778,2.06453320265,-1.35841371835,-4.05420722187,4.5255736053,-2.54840182424,-5.94124334216,-5.05016509056,-2.81190917194,-3.67080595732,-1.77554705888,3.98575968385,-2.7226165092,1.5568113219,-2.26458370864,-5.039455657,6.05570743084,-2.2971960926,-0.980765613618,2.29306887269,5.47656385183,-0.560339287222,0.599394002372,4.49311896622,1.63815704465,1.56890586585,-2.10052304626,0.367122573851,-5.79060420752,-3.93543901801,-3.83389718414,-3.62215631246,-3.43940053463,-0.401958022528,0.53790163979,-2.24713481337,-2.93054077685,-0.115260034797,-2.36293837487,-1.84129033774,1.99996710196,-1.21648682684,2.51857071042,3.64827320695,4.16069695712,3.80975566268,-4.74414714813,-3.02875908077,2.80003528588,-2.53125300467,-1.71329928577,0.627459498644,4.61502670049,-2.65913504899,-0.521180084049,-1.92113643766,2.06333797991,1.81511531889,0.170950590374,0.216834144594,-1.64254453719,1.68837884694,-0.898700688779,-3.32811955512,-3.17814456463,2.586751405,2.31587963283,-4.22904350161,-0.718470119983,-4.84180047393,-0.968689443319,1.00650176987,-0.650119360983,-4.69666749001,-1.9845663628,0.225895456072,3.25188346505,3.7214648661,1.43130174562,3.38060764193,3.90915834465,3.69100523353,-0.0311959603429,1.36241446137,4.44646521807,8.91873133182,11.7640150213,1.48888324678,-0.522589141129,0.966836237908,-0.0783090251672,2.53949897528,-6.29179323673,-0.514931401759,-2.65529345423,0.0529807021281,1.83676325411,-1.09529018879,2.14935440898,2.54911320939,-0.268381892444,4.27633724332,1.96361587535,-1.7532244429,-2.28158183038,3.45261253476,0.581260310115,8.81487321379,-3.86599963188,1.0199303931,0.769287056774,-4.58845028997,5.65657658659,5.3673800838,-1.14614109278,1.36291043639,11.190714488,2.05933052063,1.38084109723,4.25990110517,-0.0372709861028,-0.537679631114,2.06346488953,-0.573661087751,-0.94866688013,2.67103304624,-1.21616028428,1.53528989427,-0.828803119361,-4.25112649202,-1.87550593019,-3.89059672713,1.24767834112,4.34198590755,-3.09971417338,-4.30684231401,-1.61756324589,-5.39918883562,7.34323934078,-0.421631439929,0.682198462336,-3.61873383403,4.0223959434,5.40389529228,1.60736526251,-0.229281746149,-4.63487404227,-3.29047121048,3.27148656696,-3.16514086127,-2.58948973179,-1.45728034504,-2.60679310679,3.4551587224,-4.33734436273,0.408040666226,-2.58206250429,-0.901034320293,-3.78914433122,1.31350503355,4.04259036302,-0.537077046634,0.599152674081,-1.33412403017,1.80021581352,0.709162880183,-0.588873988985,-5.10033129215,-2.90737100959,1.37433995247,1.32315881923,4.48205828667,0.607754543123,0.468945158216,-1.78444975466,6.53551485418,-5.82657202005,-3.07283199429,-0.23395036608,-0.685120877029,-6.73997298956,-5.55178875566,-2.75079527497,4.04717801094,1.16547028959,-0.65315918714,1.06632480771,-3.69622943163,-3.77567577362,4.33929287315,0.461161797867,3.2264848721,-3.09787745297,4.30806201577,2.65380481884,-0.426790667771,-1.38944570095,-0.951971263289,2.73767599046,-4.78429574967,3.55026640534,-1.92020277738,4.08365875006,-1.34608724355,0.52991460502,-0.16311301112,-0.636902204679,-1.35728861392,7.01656522274,-1.04194504768,3.51204951167,-2.16685251236,-0.352366254777,4.10601737738,0.943123545944,0.196937171517,-1.13858823061,-0.989929439423,-0.42760035634,1.10082056798,2.88289318532,0.586945049761,0.191918394566,0.0784438192848,-3.76375469208,0.170746930539,-4.56917799234,7.20640591383,-2.03627742291,-1.162803858,-1.52358367979,-4.10165442705,-1.70038300634,-2.58114453554,0.544036584797,-8.4628292799,-0.39101603627,1.92033407032,-5.73090517998,2.77129089892,4.59895916582,-3.6993329978,-1.21856652394,4.50981304645,5.16903883219,-1.81281971931,-5.11896033764,5.63131075621,4.03798224807,0.94242796123,0.983446211217,3.34165998936,6.74135187387,2.30066786289,1.84391614258,5.99896759033,8.44891975641,1.82473051131,-3.49935032487,-9.36754787922,1.99033073068,5.37617359877,-6.11145027161,0.840345951319,2.77300786376,6.52381299019,3.97701953709,-4.00499682903,-5.95263335466,-1.81812204599,4.49724048495,-1.33929533959,7.18550524711,-6.75075093508,-1.61648165077,-0.901867833736,3.08160940886,0.417978906632,3.44625248551,-5.78684989214,3.32444414914,-1.96475922584,-6.30903808594,4.28494357228,6.6853062272,2.70926908404,5.95959033489,-3.74483410478,-2.90718505382,1.44551556975,1.18541310728,4.11113968076,-3.76723547578,-1.37445659459,-5.52958389044,1.54767837942,-1.92233352125,-2.86162799716,-1.39507161677,-2.14555080593,4.45648285389,5.85169536352,-4.88964028836,-1.92099967062,-4.92469491005,2.25878873229,-2.46324559539,0.196596455872,-6.0487574029,-1.55177951395,2.21588104457,-6.40127635479,-1.69814975291,1.22380428523,-3.3257760489,0.0335933651775,-1.73429207817,1.6753979218,-3.37144515515,1.30529874444,3.32560100317,4.3093956852,1.69156472504,0.913729201853,-4.65146396518,-3.03416330755,3.03830222517,2.53774605692,-5.90589033127,-3.08939878225,6.98781540632,-0.339520339072,0.055654079916,5.17970392108,-2.50772905916,1.84376598061,-1.84338212296,-0.387204087972,-0.467715920136,2.99068574548,2.88048758626,-1.45433287024,-0.196598118542,0.482496773005,1.41811820263,-3.74789556999,5.93477154017,5.5825525865,3.06712063462,-2.68137378276,-3.70440641046,-0.800623167756,3.29453107536,0.885642924309,-4.48956893444,2.6876345849,-0.693841806652,-4.02581026554,-0.468509003222,-2.75538569212,0.6321949444,5.32430804849,1.74790291831,-3.13624450207,5.02021304012,-2.23322165832,-4.04383488655,-4.85901101112,-0.93985485792,-2.78530479312,-2.02520967245,-1.95793826431,-5.30569067717,0.360007514108,-2.79794396788,1.37599081039,-5.00351879596,-4.95408667088,-0.162458266318,-1.04320560053,3.89612897635,2.07402950018,-7.80881192922,2.38426132559,2.64415159821,-4.4487659061,5.65304963946,-3.48982639909 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_ava_dataset/ava_excluded_timestamps_sample.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_ava_dataset/ava_excluded_timestamps_sample.csv new file mode 100644 index 0000000..c3467e8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_ava_dataset/ava_excluded_timestamps_sample.csv @@ -0,0 +1,2 @@ +0f39OWEqJ24,0903 +_-Z6wFjXtGQ,0902 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_ava_dataset/ava_sample.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_ava_dataset/ava_sample.csv new file mode 100644 index 0000000..e1a6982 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_ava_dataset/ava_sample.csv @@ -0,0 +1,8 @@ +0f39OWEqJ24,0902,0.031,0.162,0.670,0.995,12,0 +0f39OWEqJ24,0902,0.031,0.162,0.670,0.995,17,0 +0f39OWEqJ24,0902,0.031,0.162,0.670,0.995,79,0 +0f39OWEqJ24,0903,0.034,0.189,0.669,0.980,12,0 +0f39OWEqJ24,0903,0.034,0.189,0.669,0.980,17,0 +_-Z6wFjXtGQ,0902,0.063,0.049,0.524,0.996,12,0 +_-Z6wFjXtGQ,0902,0.063,0.049,0.524,0.996,74,0 +_-Z6wFjXtGQ,0902,0.063,0.049,0.524,0.996,80,0 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_bsp_features/v_test1.npy b/Downstream/Open-Set-Action-Recognition/tests/data/test_bsp_features/v_test1.npy new file mode 100644 index 0000000..f291a05 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_bsp_features/v_test1.npy differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_eval_detection/gt.json b/Downstream/Open-Set-Action-Recognition/tests/data/test_eval_detection/gt.json new file mode 100644 index 0000000..4ee7b12 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_eval_detection/gt.json @@ -0,0 +1,46 @@ +{ + "v_bYUmtLBL7W4": { + "duration": 224.49, + "subset": "validation", + "resolution": "1920x1080", + "url": "https://www.youtube.com/watch?v=bYUmtLBL7W4", + "annotations": [ + { + "segment": [ + 11.553655226209049, + 57.06805460218409 + ], + "label": "Wakeboarding" + }, + { + "segment": [ + 68.62170982839314, + 126.03987519500778 + ], + "label": "Wakeboarding" + }, + { + "segment": [ + 135.4928658346334, + 201.31368954758187 + ], + "label": "Wakeboarding" + } + ] + }, + "v_hDPLy21Yyuk": { + "duration": 76.23, + "subset": "validation", + "resolution": "1280x720", + "url": "https://www.youtube.com/watch?v=hDPLy21Yyuk", + "annotations": [ + { + "segment": [ + 21.392480499219968, + 76.161 + ], + "label": "Cleaning shoes" + } + ] + } +} diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_eval_detection/result.json b/Downstream/Open-Set-Action-Recognition/tests/data/test_eval_detection/result.json new file mode 100644 index 0000000..98a6075 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_eval_detection/result.json @@ -0,0 +1,120 @@ +{ + "results": { + "bYUmtLBL7W4": [ + { + "label": "Wakeboarding", + "score": 0.6533445119857788, + "segment": [ + 0.0, + 206.3465619982159 + ] + }, + { + "label": "Wakeboarding", + "score": 0.5620265007019043, + "segment": [ + 33.64346119536128, + 206.3465619982159 + ] + }, + { + "label": "Wakeboarding", + "score": 0.4421495497226715, + "segment": [ + 148.03122925958965, + 204.1036645851918 + ] + }, + { + "label": "Wakeboarding", + "score": 0.31284379959106445, + "segment": [ + 0.0, + 123.35935771632472 + ] + }, + { + "label": "Wakeboarding", + "score": 0.2897574603557587, + "segment": [ + 67.28692239072257, + 206.3465619982159 + ] + }, + { + "label": "Wakeboarding", + "score": 0.284942090511322, + "segment": [ + 33.64346119536128, + 125.60225512934882 + ] + }, + { + "label": "Wakeboarding", + "score": 0.12905514240264893, + "segment": [ + 0.0, + 53.829537912578054 + ] + }, + { + "label": "Wakeboarding", + "score": 0.12616874277591705, + "segment": [ + 67.28692239072257, + 123.35935771632472 + ] + }, + { + "label": "Wakeboarding", + "score": 0.12591737508773804, + "segment": [ + 100.93038358608386, + 204.1036645851918 + ] + }, + { + "label": "Wakeboarding", + "score": 0.10444077104330064, + "segment": [ + 38.12925602140946, + 53.829537912578054 + ] + } + ], + "hDPLy21Yyuk": [ + { + "label": "Cleaning shoes", + "score": 0.5667440891265869, + "segment": [ + 21.222965776805253, + 75.03834328227572 + ] + }, + { + "label": "Cleaning shoes", + "score": 0.414698988199234, + "segment": [ + 21.222965776805253, + 43.96185768052516 + ] + }, + { + "label": "Cleaning shoes", + "score": 0.21768000721931455, + "segment": [ + 0.0, + 75.03834328227572 + ] + }, + { + "label": "Cleaning shoes", + "score": 0.10800375044345856, + "segment": [ + 29.560559474835888, + 70.49056490153174 + ] + } + ] + } +} diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00001.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00001.jpg new file mode 100644 index 0000000..e846e5a Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00001.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00002.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00002.jpg new file mode 100644 index 0000000..6d7c81b Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00002.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00003.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00003.jpg new file mode 100644 index 0000000..6289b32 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00003.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00004.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00004.jpg new file mode 100644 index 0000000..a75094d Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00004.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00005.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00005.jpg new file mode 100644 index 0000000..25828b8 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00005.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00006.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00006.jpg new file mode 100644 index 0000000..7f0fa6c Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00006.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00007.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00007.jpg new file mode 100644 index 0000000..2ebc51f Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00007.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00008.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00008.jpg new file mode 100644 index 0000000..f974704 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00008.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00009.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00009.jpg new file mode 100644 index 0000000..b4a74eb Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00009.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00010.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00010.jpg new file mode 100644 index 0000000..9944e62 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/img_00010.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00001.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00001.jpg new file mode 100644 index 0000000..705ba4b Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00001.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00002.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00002.jpg new file mode 100644 index 0000000..f501675 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00002.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00003.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00003.jpg new file mode 100644 index 0000000..f419d71 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00003.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00004.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00004.jpg new file mode 100644 index 0000000..cb52d25 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00004.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00005.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00005.jpg new file mode 100644 index 0000000..399fda2 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/x_00005.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00001.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00001.jpg new file mode 100644 index 0000000..743b0b2 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00001.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00002.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00002.jpg new file mode 100644 index 0000000..37f84d0 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00002.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00003.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00003.jpg new file mode 100644 index 0000000..938a5b6 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00003.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00004.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00004.jpg new file mode 100644 index 0000000..af4c666 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00004.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00005.jpg b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00005.jpg new file mode 100644 index 0000000..41e05d7 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_imgs/y_00005.jpg differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_proposals/v_test1.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_proposals/v_test1.csv new file mode 100644 index 0000000..5c65dc3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_proposals/v_test1.csv @@ -0,0 +1,10 @@ +tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa +0.1,0.2,0.95,0.96,0.97,0.85,0.84 +0.2,0.3,0.94,0.95,0.96,0.84,0.83 +0.3,0.4,0.93,0.94,0.95,0.83,0.82 +0.4,0.5,0.92,0.93,0.94,0.82,0.81 +0.5,0.6,0.91,0.92,0.93,0.81,0.80 +0.6,0.7,0.90,0.91,0.92,0.80,0.79 +0.5,0.7,0.90,0.91,0.92,0.80,0.79 +0.6,0.8,0.90,0.91,0.92,0.80,0.79 +0.4,0.7,0.90,0.91,0.92,0.80,0.79 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_proposals/v_test2.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_proposals/v_test2.csv new file mode 100644 index 0000000..5fa47cf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_proposals/v_test2.csv @@ -0,0 +1,7 @@ +tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa +0.1,0.2,0.95,0.96,0.97,0.75,0.74 +0.2,0.3,0.94,0.95,0.96,0.74,0.73 +0.3,0.4,0.93,0.94,0.95,0.73,0.72 +0.4,0.5,0.92,0.93,0.94,0.72,0.71 +0.5,0.6,0.91,0.92,0.93,0.71,0.70 +0.6,0.7,0.90,0.91,0.92,0.70,0.79 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_rawvideo_dataset/part_0.mp4 b/Downstream/Open-Set-Action-Recognition/tests/data/test_rawvideo_dataset/part_0.mp4 new file mode 100644 index 0000000..285e1b4 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_rawvideo_dataset/part_0.mp4 differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_rawvideo_dataset/part_1.mp4 b/Downstream/Open-Set-Action-Recognition/tests/data/test_rawvideo_dataset/part_1.mp4 new file mode 100644 index 0000000..285e1b4 Binary files /dev/null and b/Downstream/Open-Set-Action-Recognition/tests/data/test_rawvideo_dataset/part_1.mp4 differ diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_tem_results/v_test1.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_tem_results/v_test1.csv new file mode 100644 index 0000000..2a20d5a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_tem_results/v_test1.csv @@ -0,0 +1,11 @@ +action,start,end,tmin,tmax +3.711169585585594177e-02,5.839086771011352539e-01,1.464508026838302612e-01,0.0,0.1 +1.555041410028934479e-02,3.062666654586791992e-01,2.622193098068237305e-01,0.1,0.2 +1.146762818098068237e-02,1.464279890060424805e-01,3.260520696640014648e-01,0.2,0.3 +1.371797081083059311e-02,1.365097165107727051e-01,3.570831716060638428e-01,0.3,0.4 +1.519643329083919525e-02,1.688144057989120483e-01,3.057994544506072998e-01,0.4,0.5 +1.968025043606758118e-02,1.974480003118515015e-01,2.933082580566406250e-01,0.5,0.6 +2.251588553190231323e-02,1.885317713022232056e-01,3.326449990272521973e-01,0.6,0.7 +2.402217499911785126e-02,1.918197423219680786e-01,3.420312106609344482e-01,0.7,0.8 +2.045033127069473267e-02,1.970291137695312500e-01,3.339000344276428223e-01,0.8,0.9 +3.435279428958892822e-02,5.583426356315612793e-01,1.250019371509552002e-01,0.9,1.0 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/test_tem_results/v_test2.csv b/Downstream/Open-Set-Action-Recognition/tests/data/test_tem_results/v_test2.csv new file mode 100644 index 0000000..89fd5c4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/test_tem_results/v_test2.csv @@ -0,0 +1,11 @@ +action,start,end,tmin,tmax +5.711169585585594177e-02,7.839086771011352539e-01,3.464508026838302612e-01,0.0,0.1 +2.555041410028934479e-02,3.062666654586791992e-01,3.622193098068237305e-01,0.1,0.2 +2.146762818098068237e-02,2.464279890060424805e-01,3.260520696640014648e-01,0.2,0.3 +1.371797081083059311e-02,1.365097165107727051e-01,3.570831716060638428e-01,0.3,0.4 +1.519643329083919525e-02,1.688144057989120483e-01,3.057994544506072998e-01,0.4,0.5 +1.968025043606758118e-02,1.974480003118515015e-01,2.933082580566406250e-01,0.5,0.6 +2.251588553190231323e-02,1.885317713022232056e-01,3.326449990272521973e-01,0.6,0.7 +2.402217499911785126e-02,1.918197423219680786e-01,3.420312106609344482e-01,0.7,0.8 +2.045033127069473267e-02,1.970291137695312500e-01,3.339000344276428223e-01,0.8,0.9 +3.435279428958892822e-02,5.583426356315612793e-01,1.250019371509552002e-01,0.9,1.0 diff --git a/Downstream/Open-Set-Action-Recognition/tests/data/video_test_list.txt b/Downstream/Open-Set-Action-Recognition/tests/data/video_test_list.txt new file mode 100644 index 0000000..2f415bd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/data/video_test_list.txt @@ -0,0 +1,2 @@ +test.mp4 0 +test.mp4 0 diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_accuracy.py b/Downstream/Open-Set-Action-Recognition/tests/test_accuracy.py new file mode 100644 index 0000000..8db62a7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_accuracy.py @@ -0,0 +1,293 @@ +import os.path as osp +import random + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from mmaction.core import (ActivityNetDetection, + average_recall_at_avg_proposals, confusion_matrix, + get_weighted_score, mean_average_precision, + mean_class_accuracy, mmit_mean_average_precision, + pairwise_temporal_iou, top_k_accuracy) + + +def gt_confusion_matrix(gt_labels, pred_labels, normalize=None): + """Calculate the ground truth confusion matrix.""" + max_index = max(max(gt_labels), max(pred_labels)) + confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64) + for gt, pred in zip(gt_labels, pred_labels): + confusion_mat[gt][pred] += 1 + del_index = [] + for i in range(max_index): + if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0: + del_index.append(i) + confusion_mat = np.delete(confusion_mat, del_index, axis=0) + confusion_mat = np.delete(confusion_mat, del_index, axis=1) + + if normalize is not None: + confusion_mat = np.array(confusion_mat, dtype=np.float) + m, n = confusion_mat.shape + if normalize == 'true': + for i in range(m): + s = np.sum(confusion_mat[i], dtype=float) + if s == 0: + continue + confusion_mat[i, :] = confusion_mat[i, :] / s + print(confusion_mat[i, :]) + elif normalize == 'pred': + for i in range(n): + s = sum(confusion_mat[:, i]) + if s == 0: + continue + confusion_mat[:, i] = confusion_mat[:, i] / s + elif normalize == 'all': + s = np.sum(confusion_mat) + if s != 0: + confusion_mat /= s + + return confusion_mat + + +def test_activitynet_detection(): + data_prefix = osp.join(osp.dirname(__file__), 'data/test_eval_detection') + gt_path = osp.join(data_prefix, 'gt.json') + result_path = osp.join(data_prefix, 'result.json') + detection = ActivityNetDetection(gt_path, result_path) + + results = detection.evaluate() + mAP = np.array([ + 0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222, + 0.52083333, 0.52083333, 0.52083333, 0.5 + ]) + average_mAP = 0.6177579365079365 + + assert_array_almost_equal(results[0], mAP) + assert_array_almost_equal(results[1], average_mAP) + + +def test_confusion_matrix(): + # custom confusion_matrix + gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)] + pred_labels = np.random.randint(10, size=100, dtype=np.int64) + + for normalize in [None, 'true', 'pred', 'all']: + cf_mat = confusion_matrix(pred_labels, gt_labels, normalize) + gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize) + assert_array_equal(cf_mat, gt_cf_mat) + + with pytest.raises(ValueError): + # normalize must be in ['true', 'pred', 'all', None] + confusion_matrix([1], [1], 'unsupport') + + with pytest.raises(TypeError): + # y_pred must be list or np.ndarray + confusion_matrix(0.5, [1]) + + with pytest.raises(TypeError): + # y_real must be list or np.ndarray + confusion_matrix([1], 0.5) + + with pytest.raises(TypeError): + # y_pred dtype must be np.int64 + confusion_matrix([0.5], [1]) + + with pytest.raises(TypeError): + # y_real dtype must be np.int64 + confusion_matrix([1], [0.5]) + + +def test_topk(): + scores = [ + np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]), + np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), + np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), + np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]) + ] + + # top1 acc + k = (1, ) + top1_labels_0 = [3, 1, 1, 1] + top1_labels_25 = [2, 0, 4, 3] + top1_labels_50 = [2, 2, 3, 1] + top1_labels_75 = [2, 2, 2, 3] + top1_labels_100 = [2, 2, 2, 4] + res = top_k_accuracy(scores, top1_labels_0, k) + assert res == [0] + res = top_k_accuracy(scores, top1_labels_25, k) + assert res == [0.25] + res = top_k_accuracy(scores, top1_labels_50, k) + assert res == [0.5] + res = top_k_accuracy(scores, top1_labels_75, k) + assert res == [0.75] + res = top_k_accuracy(scores, top1_labels_100, k) + assert res == [1.0] + + # top1 acc, top2 acc + k = (1, 2) + top2_labels_0_100 = [3, 1, 1, 1] + top2_labels_25_75 = [3, 1, 2, 3] + res = top_k_accuracy(scores, top2_labels_0_100, k) + assert res == [0, 1.0] + res = top_k_accuracy(scores, top2_labels_25_75, k) + assert res == [0.25, 0.75] + + # top1 acc, top3 acc, top5 acc + k = (1, 3, 5) + top5_labels_0_0_100 = [1, 0, 3, 2] + top5_labels_0_50_100 = [1, 3, 4, 0] + top5_labels_25_75_100 = [2, 3, 0, 2] + res = top_k_accuracy(scores, top5_labels_0_0_100, k) + assert res == [0, 0, 1.0] + res = top_k_accuracy(scores, top5_labels_0_50_100, k) + assert res == [0, 0.5, 1.0] + res = top_k_accuracy(scores, top5_labels_25_75_100, k) + assert res == [0.25, 0.75, 1.0] + + +def test_mean_class_accuracy(): + scores = [ + np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]), + np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), + np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), + np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]) + ] + + # test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0] + mean_cls_acc_0 = np.int64([1, 4, 0, 2]) + mean_cls_acc_25 = np.int64([2, 0, 4, 3]) + mean_cls_acc_33 = np.int64([2, 2, 2, 3]) + mean_cls_acc_75 = np.int64([4, 2, 2, 4]) + mean_cls_acc_100 = np.int64([2, 2, 2, 4]) + assert mean_class_accuracy(scores, mean_cls_acc_0) == 0 + assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25 + assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3 + assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75 + assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0 + + +def test_mmit_mean_average_precision(): + # One sample + y_true = [np.array([0, 0, 1, 1])] + y_scores = [np.array([0.1, 0.4, 0.35, 0.8])] + map = mmit_mean_average_precision(y_scores, y_true) + + precision = [2.0 / 3.0, 0.5, 1., 1.] + recall = [1., 0.5, 0.5, 0.] + target = -np.sum(np.diff(recall) * np.array(precision)[:-1]) + assert target == map + + +def test_pairwise_temporal_iou(): + target_segments = np.array([]) + candidate_segments = np.array([]) + with pytest.raises(ValueError): + pairwise_temporal_iou(target_segments, candidate_segments) + + # test temporal iou + target_segments = np.array([[1, 2], [2, 3]]) + candidate_segments = np.array([[2, 3], [2.5, 3]]) + temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments) + assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]]) + + # test temporal overlap_self + target_segments = np.array([[1, 2], [2, 3]]) + candidate_segments = np.array([[2, 3], [2.5, 3]]) + temporal_iou, temporal_overlap_self = pairwise_temporal_iou( + candidate_segments, target_segments, calculate_overlap_self=True) + assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]]) + + # test temporal overlap_self when candidate_segments is 1d + target_segments = np.array([[1, 2], [2, 3]]) + candidate_segments = np.array([2.5, 3]) + temporal_iou, temporal_overlap_self = pairwise_temporal_iou( + candidate_segments, target_segments, calculate_overlap_self=True) + assert_array_equal(temporal_overlap_self, [0, 1]) + + +def test_average_recall_at_avg_proposals(): + ground_truth1 = { + 'v_test1': np.array([[0, 1], [1, 2]]), + 'v_test2': np.array([[0, 1], [1, 2]]) + } + ground_truth2 = {'v_test1': np.array([[0, 1]])} + proposals1 = { + 'v_test1': np.array([[0, 1, 1], [1, 2, 1]]), + 'v_test2': np.array([[0, 1, 1], [1, 2, 1]]) + } + proposals2 = { + 'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]), + 'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]]) + } + proposals3 = { + 'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)]) + } + + recall, avg_recall, proposals_per_video, auc = ( + average_recall_at_avg_proposals(ground_truth1, proposals1, 4)) + assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10) + assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.]) + assert_array_almost_equal( + proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10) + assert auc == 25.5 + + recall, avg_recall, proposals_per_video, auc = ( + average_recall_at_avg_proposals(ground_truth1, proposals2, 4)) + assert_array_equal(recall, [[0.] * 100] * 10) + assert_array_equal(avg_recall, [0.] * 100) + assert_array_almost_equal( + proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10) + assert auc == 0 + + recall, avg_recall, proposals_per_video, auc = ( + average_recall_at_avg_proposals(ground_truth2, proposals3, 100)) + assert_array_equal(recall, [[1.] * 100] * 10) + assert_array_equal(avg_recall, ([1.] * 100)) + assert_array_almost_equal( + proposals_per_video, np.arange(1, 101, 1), decimal=10) + assert auc == 99.0 + + +def test_get_weighted_score(): + score_a = [ + np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]), + np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), + np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), + np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]) + ] + score_b = [ + np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]), + np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]), + np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]), + np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]) + ] + weighted_score = get_weighted_score([score_a], [1]) + assert np.all(np.isclose(np.array(score_a), np.array(weighted_score))) + coeff_a, coeff_b = 2., 1. + weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b]) + ground_truth = [ + x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b) + ] + assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score))) + + +def test_mean_average_precision(): + + def content_for_unittest(scores, labels, result): + gt = mean_average_precision(scores, labels) + assert gt == result + + scores = [ + np.array([0.1, 0.2, 0.3, 0.4]), + np.array([0.2, 0.3, 0.4, 0.1]), + np.array([0.3, 0.4, 0.1, 0.2]), + np.array([0.4, 0.1, 0.2, 0.3]) + ] + + label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]]) + result1 = 2 / 3 + label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]]) + result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0]) + + content_for_unittest(scores, label1, result1) + content_for_unittest(scores, label2, result2) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_data/test_augmentations.py b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_augmentations.py new file mode 100644 index 0000000..7ea1fa1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_augmentations.py @@ -0,0 +1,1308 @@ +import copy + +import mmcv +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +# yapf: disable +from mmaction.datasets.pipelines import (AudioAmplify, CenterCrop, ColorJitter, + EntityBoxClip, EntityBoxCrop, + EntityBoxFlip, EntityBoxPad, + EntityBoxRescale, Flip, Fuse, + MelSpectrogram, MultiGroupCrop, + MultiScaleCrop, Normalize, RandomCrop, + RandomRescale, RandomResizedCrop, + RandomScale, Resize, TenCrop, + ThreeCrop) + +# yapf: enable + + +class TestAugumentations: + + @staticmethod + def check_keys_contain(result_keys, target_keys): + """Check if all elements in target_keys is in result_keys.""" + return set(target_keys).issubset(set(result_keys)) + + @staticmethod + def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1): + """Check if the result_bbox is in correspond to result_imgs.""" + + def check_single_crop(origin_imgs, result_imgs, result_bbox): + result_img_shape = result_imgs[0].shape[:2] + crop_w = result_bbox[2] - result_bbox[0] + crop_h = result_bbox[3] - result_bbox[1] + crop_shape = (crop_h, crop_w) + if not crop_shape == result_img_shape: + return False + left, top, right, bottom = result_bbox + return np.array_equal( + np.array(origin_imgs)[:, top:bottom, left:right, :], + np.array(result_imgs)) + + if result_bbox.ndim == 1: + return check_single_crop(origin_imgs, result_imgs, result_bbox) + elif result_bbox.ndim == 2: + num_batch = len(origin_imgs) + for i, bbox in enumerate(result_bbox): + if num_crops == 10: + if (i // num_batch) % 2 == 0: + flag = check_single_crop([origin_imgs[i % num_batch]], + [result_imgs[i]], bbox) + else: + flag = check_single_crop( + [origin_imgs[i % num_batch]], + [np.flip(result_imgs[i], axis=1)], bbox) + else: + flag = check_single_crop([origin_imgs[i % num_batch]], + [result_imgs[i]], bbox) + if not flag: + return False + return True + else: + # bbox has a wrong dimension + return False + + @staticmethod + def check_flip(origin_imgs, result_imgs, flip_type): + """Check if the origin_imgs are flipped correctly into result_imgs in + different flip_types.""" + n = len(origin_imgs) + h, w, c = origin_imgs[0].shape + if flip_type == 'horizontal': + # yapf: disable + for i in range(n): + for j in range(h): + for k in range(w): + for channel in range(c): + if result_imgs[i][j, k, channel] != origin_imgs[i][j, w - 1 - k, channel]: # noqa:E501 + return False + # yapf: enable + else: + # yapf: disable + for i in range(n): + for j in range(h): + for k in range(w): + for channel in range(c): + if result_imgs[i][j, k, channel] != origin_imgs[i][h - 1 - j, k, channel]: # noqa:E501 + return False + # yapf: enable + return True + + @staticmethod + def check_normalize(origin_imgs, result_imgs, norm_cfg): + """Check if the origin_imgs are normalized correctly into result_imgs + in a given norm_cfg.""" + target_imgs = result_imgs.copy() + target_imgs *= norm_cfg['std'] + target_imgs += norm_cfg['mean'] + if norm_cfg['to_bgr']: + target_imgs = target_imgs[..., ::-1].copy() + assert_array_almost_equal(origin_imgs, target_imgs, decimal=4) + + def test_init_lazy(self): + from mmaction.datasets.pipelines.augmentations import \ + _init_lazy_if_proper # noqa: E501 + with pytest.raises(AssertionError): + # use lazy operation but "lazy" not in results + result = dict(lazy=dict(), img_shape=[64, 64]) + _init_lazy_if_proper(result, False) + + lazy_keys = [ + 'original_shape', 'crop_bbox', 'flip', 'flip_direction', + 'interpolation' + ] + + # 'img_shape' not in results + result = dict(imgs=list(np.random.randn(3, 64, 64, 3))) + _init_lazy_if_proper(result, True) + assert self.check_keys_contain(result, ['imgs', 'lazy', 'img_shape']) + assert self.check_keys_contain(result['lazy'], lazy_keys) + + # 'img_shape' in results + result = dict(img_shape=[64, 64]) + _init_lazy_if_proper(result, True) + assert self.check_keys_contain(result, ['lazy', 'img_shape']) + assert self.check_keys_contain(result['lazy'], lazy_keys) + + # do not use lazy operation + result = dict(img_shape=[64, 64]) + _init_lazy_if_proper(result, False) + assert self.check_keys_contain(result, ['img_shape']) + assert 'lazy' not in result + + def test_random_crop(self): + with pytest.raises(TypeError): + # size must be an int + RandomCrop(size=(112, 112)) + with pytest.raises(AssertionError): + # "size > height" or "size > width" is not allowed + imgs = list(np.random.rand(2, 224, 341, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=320) + random_crop(results) + + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + + # General case + imgs = list(np.random.rand(2, 224, 341, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=224) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert self.check_crop(imgs, random_crop_result['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert h == w == 224 + + # Test the case that no need for cropping + imgs = list(np.random.rand(2, 224, 224, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=224) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert self.check_crop(imgs, random_crop_result['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert h == w == 224 + + # Test the one-side-equal case + imgs = list(np.random.rand(2, 224, 225, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=224) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert self.check_crop(imgs, random_crop_result['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert h == w == 224 + + assert repr(random_crop) == (f'{random_crop.__class__.__name__}' + f'(size={224}, lazy={False})') + + def test_random_crop_lazy(self): + with pytest.raises(TypeError): + # size must be an int + RandomCrop(size=(112, 112), lazy=True) + with pytest.raises(AssertionError): + # "size > height" or "size > width" is not allowed + imgs = list(np.random.rand(2, 224, 341, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=320, lazy=True) + random_crop(results) + + target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy'] + + # General case + imgs = list(np.random.rand(2, 224, 341, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=224, lazy=True) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert id(imgs) == id(random_crop_result['imgs']) + random_crop_result_fuse = Fuse()(random_crop_result) + assert 'lazy' not in random_crop_result_fuse + assert self.check_crop(imgs, random_crop_result_fuse['imgs'], + results['crop_bbox']) + h, w = random_crop_result_fuse['img_shape'] + assert h == w == 224 + + # Test the case that no need for cropping + imgs = list(np.random.rand(2, 224, 224, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=224, lazy=True) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert id(imgs) == id(random_crop_result['imgs']) + random_crop_result_fuse = Fuse()(random_crop_result) + assert 'lazy' not in random_crop_result_fuse + assert self.check_crop(imgs, random_crop_result_fuse['imgs'], + results['crop_bbox']) + h, w = random_crop_result_fuse['img_shape'] + assert h == w == 224 + + # Test the one-side-equal case + imgs = list(np.random.rand(2, 224, 225, 3)) + results = dict(imgs=imgs) + random_crop = RandomCrop(size=224, lazy=True) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert id(imgs) == id(random_crop_result['imgs']) + random_crop_result_fuse = Fuse()(random_crop_result) + assert 'lazy' not in random_crop_result_fuse + assert self.check_crop(imgs, random_crop_result_fuse['imgs'], + results['crop_bbox']) + h, w = random_crop_result_fuse['img_shape'] + assert h == w == 224 + + assert repr(random_crop) == (f'{random_crop.__class__.__name__}' + f'(size={224}, lazy={True})') + + def test_random_resized_crop(self): + with pytest.raises(TypeError): + # area_range must be a tuple of float + RandomResizedCrop(area_range=0.5) + with pytest.raises(TypeError): + # aspect_ratio_range must be a tuple of float + RandomResizedCrop(area_range=(0.08, 1.0), aspect_ratio_range=0.1) + + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + # There will be a slight difference because of rounding + eps = 0.01 + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + + with pytest.raises(AssertionError): + # area_range[0] > area_range[1], which is wrong + random_crop = RandomResizedCrop(area_range=(0.9, 0.7)) + random_crop(results) + with pytest.raises(AssertionError): + # 0 > area_range[0] and area_range[1] > 1, which is wrong + random_crop = RandomResizedCrop(aspect_ratio_range=(-0.1, 2.0)) + random_crop(results) + + random_crop = RandomResizedCrop() + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert self.check_crop(imgs, random_crop_result['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert ((0.08 - eps <= h * w / 256 / 341) + and (h * w / 256 / 341 <= 1 + eps)) + assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.) + assert repr(random_crop) == (f'{random_crop.__class__.__name__}' + f'(area_range={(0.08, 1.0)}, ' + f'aspect_ratio_range={(3 / 4, 4 / 3)}, ' + f'lazy={False})') + + random_crop = RandomResizedCrop( + area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1)) + # Test fallback cases by very big area range + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert self.check_crop(imgs, random_crop_result['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert h == w == 256 + + def test_random_rescale(self): + with pytest.raises(AssertionError): + # scale_range must be a tuple of int + RandomRescale(scale_range=224) + + with pytest.raises(AssertionError): + # scale_range must be a tuple of int + RandomRescale(scale_range=(224.0, 256.0)) + + with pytest.raises(AssertionError): + # scale_range[0] > scale_range[1], which is wrong + RandomRescale(scale_range=(320, 256)) + + with pytest.raises(AssertionError): + # scale_range[0] <= 0, which is wrong + RandomRescale(scale_range=(0, 320)) + + target_keys = ['imgs', 'short_edge', 'img_shape'] + # There will be a slight difference because of rounding + eps = 0.01 + imgs = list(np.random.rand(2, 256, 340, 3)) + results = dict(imgs=imgs, img_shape=(256, 340), modality='RGB') + + random_rescale = RandomRescale(scale_range=(300, 400)) + random_rescale_result = random_rescale(results) + + assert self.check_keys_contain(random_rescale_result.keys(), + target_keys) + + h, w = random_rescale_result['img_shape'] + + # check rescale + assert np.abs(h / 256 - w / 340) < eps + assert 300 / 256 - eps <= h / 256 <= 400 / 256 + eps + assert repr(random_rescale) == (f'{random_rescale.__class__.__name__}' + f'(scale_range={(300, 400)}, ' + 'interpolation=bilinear)') + + def test_random_resized_crop_lazy(self): + + target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy'] + # There will be a slight difference because of rounding + eps = 0.01 + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + + with pytest.raises(AssertionError): + # area_range[0] > area_range[1], which is wrong + random_crop = RandomResizedCrop(area_range=(0.9, 0.7), lazy=True) + random_crop(results) + with pytest.raises(AssertionError): + # 0 > area_range[0] and area_range[1] > 1, which is wrong + random_crop = RandomResizedCrop( + aspect_ratio_range=(-0.1, 2.0), lazy=True) + random_crop(results) + + random_crop = RandomResizedCrop(lazy=True) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert id(imgs) == id(random_crop_result['imgs']) + random_crop_result_fuse = Fuse()(random_crop_result) + assert self.check_crop(imgs, random_crop_result_fuse['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert ((0.08 - eps <= h * w / 256 / 341) + and (h * w / 256 / 341 <= 1 + eps)) + assert (3. / 4. - eps <= h / w) and (h / w - eps <= 4. / 3.) + assert repr(random_crop) == (f'{random_crop.__class__.__name__}' + f'(area_range={(0.08, 1.0)}, ' + f'aspect_ratio_range={(3 / 4, 4 / 3)}, ' + f'lazy={True})') + + random_crop = RandomResizedCrop( + area_range=(0.9, 0.9), aspect_ratio_range=(10.0, 10.1), lazy=True) + # Test fallback cases by very big area range + imgs = np.random.rand(2, 256, 341, 3) + results = dict(imgs=imgs) + random_crop_result = random_crop(results) + assert self.check_keys_contain(random_crop_result.keys(), target_keys) + assert id(imgs) == id(random_crop_result['imgs']) + random_crop_result_fuse = Fuse()(random_crop_result) + assert self.check_crop(imgs, random_crop_result_fuse['imgs'], + results['crop_bbox']) + h, w = random_crop_result['img_shape'] + assert h == w == 256 + + def test_multi_scale_crop(self): + with pytest.raises(TypeError): + # input_size must be int or tuple of int + MultiScaleCrop(0.5) + + with pytest.raises(TypeError): + # input_size must be int or tuple of int + MultiScaleCrop('224') + + with pytest.raises(TypeError): + # input_size must be int or tuple of int + MultiScaleCrop([224, 224]) + + with pytest.raises(TypeError): + # scales must be tuple. + MultiScaleCrop( + 224, scales=[ + 1, + ]) + + with pytest.raises(ValueError): + # num_fix_crops must be in [5, 13] + MultiScaleCrop(224, num_fixed_crops=6) + + target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales'] + + # MultiScaleCrop with normal crops. + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + config = dict( + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0) + multi_scale_crop = MultiScaleCrop(**config) + multi_scale_crop_results = multi_scale_crop(results) + assert self.check_keys_contain(multi_scale_crop_results.keys(), + target_keys) + assert self.check_crop(imgs, multi_scale_crop_results['imgs'], + multi_scale_crop_results['crop_bbox']) + assert multi_scale_crop_results['img_shape'] in [(256, 256), + (204, 204)] + + # MultiScaleCrop with more fixed crops. + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + config = dict( + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0, + num_fixed_crops=13) + multi_scale_crop = MultiScaleCrop(**config) + multi_scale_crop_results = multi_scale_crop(results) + assert self.check_keys_contain(multi_scale_crop_results.keys(), + target_keys) + assert self.check_crop(imgs, multi_scale_crop_results['imgs'], + multi_scale_crop_results['crop_bbox']) + assert multi_scale_crop_results['img_shape'] in [(256, 256), + (204, 204)] + + # MultiScaleCrop with random crop. + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + config = dict( + input_size=224, + scales=(1, 0.8), + random_crop=True, + max_wh_scale_gap=0) + multi_scale_crop = MultiScaleCrop(**config) + multi_scale_crop_results = multi_scale_crop(results) + assert self.check_keys_contain(multi_scale_crop_results.keys(), + target_keys) + assert self.check_crop(imgs, multi_scale_crop_results['imgs'], + multi_scale_crop_results['crop_bbox']) + assert (multi_scale_crop_results['img_shape'] in [(256, 256), + (204, 204)]) + + assert repr(multi_scale_crop) == ( + f'{multi_scale_crop.__class__.__name__}' + f'(input_size={(224, 224)}, scales={(1, 0.8)}, ' + f'max_wh_scale_gap={0}, random_crop={True}, ' + f'num_fixed_crops=5, lazy={False})') + + def test_multi_scale_crop_lazy(self): + with pytest.raises(TypeError): + # input_size must be int or tuple of int + MultiScaleCrop(0.5, lazy=True) + + with pytest.raises(TypeError): + # input_size must be int or tuple of int + MultiScaleCrop('224', lazy=True) + + with pytest.raises(TypeError): + # input_size must be int or tuple of int + MultiScaleCrop([224, 224], lazy=True) + + with pytest.raises(TypeError): + # scales must be tuple. + MultiScaleCrop( + 224, scales=[ + 1, + ], lazy=True) + + with pytest.raises(ValueError): + # num_fix_crops must be in [5, 13] + MultiScaleCrop(224, num_fixed_crops=6, lazy=True) + + target_keys = ['imgs', 'crop_bbox', 'img_shape', 'scales'] + + # MultiScaleCrop with normal crops. + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + config = dict( + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0, + lazy=True) + multi_scale_crop = MultiScaleCrop(**config) + multi_scale_crop_result = multi_scale_crop(results) + assert id(imgs) == id(multi_scale_crop_result['imgs']) + assert self.check_keys_contain(multi_scale_crop_result.keys(), + target_keys) + multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result) + assert self.check_crop(imgs, multi_scale_crop_result_fuse['imgs'], + multi_scale_crop_result['crop_bbox']) + assert multi_scale_crop_result_fuse['img_shape'] in [(256, 256), + (204, 204)] + + # MultiScaleCrop with more fixed crops. + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + config = dict( + input_size=224, + scales=(1, 0.8), + random_crop=False, + max_wh_scale_gap=0, + num_fixed_crops=13, + lazy=True) + multi_scale_crop = MultiScaleCrop(**config) + multi_scale_crop_result = multi_scale_crop(results) + assert id(imgs) == id(multi_scale_crop_result['imgs']) + assert self.check_keys_contain(multi_scale_crop_result.keys(), + target_keys) + multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result) + assert self.check_crop(imgs, multi_scale_crop_result_fuse['imgs'], + multi_scale_crop_result['crop_bbox']) + assert multi_scale_crop_result_fuse['img_shape'] in [(256, 256), + (204, 204)] + + # MultiScaleCrop with random crop. + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + config = dict( + input_size=224, + scales=(1, 0.8), + random_crop=True, + max_wh_scale_gap=0, + lazy=True) + multi_scale_crop = MultiScaleCrop(**config) + multi_scale_crop_result = multi_scale_crop(results) + assert id(imgs) == id(multi_scale_crop_result['imgs']) + assert self.check_keys_contain(multi_scale_crop_result.keys(), + target_keys) + multi_scale_crop_result_fuse = Fuse()(multi_scale_crop_result) + assert self.check_crop(imgs, multi_scale_crop_result_fuse['imgs'], + multi_scale_crop_result['crop_bbox']) + assert (multi_scale_crop_result_fuse['img_shape'] in [(256, 256), + (204, 204)]) + + assert repr(multi_scale_crop) == ( + f'{multi_scale_crop.__class__.__name__}' + f'(input_size={(224, 224)}, scales={(1, 0.8)}, ' + f'max_wh_scale_gap={0}, random_crop={True}, ' + f'num_fixed_crops={5}, lazy={True})') + + def test_resize(self): + with pytest.raises(ValueError): + # scale must be positive + Resize(-0.5) + + with pytest.raises(TypeError): + # scale must be tuple of int + Resize('224') + + target_keys = [ + 'imgs', 'img_shape', 'keep_ratio', 'scale_factor', 'modality' + ] + + # test resize for flow images + imgs = list(np.random.rand(2, 240, 320)) + results = dict(imgs=imgs, modality='Flow') + resize = Resize(scale=(160, 80), keep_ratio=False) + resize_results = resize(results) + assert self.check_keys_contain(resize_results.keys(), target_keys) + assert np.all(resize_results['scale_factor'] == np.array( + [.5, 1. / 3.], dtype=np.float32)) + assert resize_results['img_shape'] == (80, 160) + + # scale with -1 to indicate np.inf + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs, modality='RGB') + resize = Resize(scale=(-1, 256), keep_ratio=True) + resize_results = resize(results) + assert self.check_keys_contain(resize_results.keys(), target_keys) + assert np.all(resize_results['scale_factor'] == np.array( + [341 / 320, 256 / 240], dtype=np.float32)) + assert resize_results['img_shape'] == (256, 341) + + # scale with a normal tuple (320, 320) to indicate np.inf + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs, modality='RGB') + resize = Resize(scale=(320, 320), keep_ratio=False) + resize_results = resize(results) + assert self.check_keys_contain(resize_results.keys(), target_keys) + assert np.all(resize_results['scale_factor'] == np.array( + [1, 320 / 240], dtype=np.float32)) + assert resize_results['img_shape'] == (320, 320) + + # scale with a normal tuple (341, 256) to indicate np.inf + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs, modality='RGB') + resize = Resize(scale=(341, 256), keep_ratio=False) + resize_results = resize(results) + assert self.check_keys_contain(resize_results.keys(), target_keys) + assert np.all(resize_results['scale_factor'] == np.array( + [341 / 320, 256 / 240], dtype=np.float32)) + assert resize_results['img_shape'] == (256, 341) + + assert repr(resize) == ( + resize.__class__.__name__ + + f'(scale={(341, 256)}, keep_ratio={False}, ' + + f'interpolation=bilinear, lazy={False})') + + def test_resize_lazy(self): + with pytest.raises(ValueError): + # scale must be positive + Resize(-0.5, lazy=True) + + with pytest.raises(TypeError): + # scale must be tuple of int + Resize('224', lazy=True) + + target_keys = [ + 'imgs', 'img_shape', 'keep_ratio', 'scale_factor', 'modality' + ] + + # scale with -1 to indicate np.inf + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs, modality='RGB') + resize = Resize(scale=(-1, 256), keep_ratio=True, lazy=True) + resize_results = resize(results) + assert id(imgs) == id(resize_results['imgs']) + assert self.check_keys_contain(resize_results.keys(), target_keys) + resize_results_fuse = Fuse()(resize_results) + assert np.all(resize_results_fuse['scale_factor'] == np.array( + [341 / 320, 256 / 240], dtype=np.float32)) + assert resize_results_fuse['img_shape'] == (256, 341) + + # scale with a normal tuple (320, 320) to indicate np.inf + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs, modality='RGB') + resize = Resize(scale=(320, 320), keep_ratio=False, lazy=True) + resize_results = resize(results) + assert id(imgs) == id(resize_results['imgs']) + assert self.check_keys_contain(resize_results.keys(), target_keys) + resize_results_fuse = Fuse()(resize_results) + assert np.all(resize_results_fuse['scale_factor'] == np.array( + [1, 320 / 240], dtype=np.float32)) + assert resize_results_fuse['img_shape'] == (320, 320) + + # scale with a normal tuple (341, 256) to indicate np.inf + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs, modality='RGB') + resize = Resize(scale=(341, 256), keep_ratio=False, lazy=True) + resize_results = resize(results) + assert id(imgs) == id(resize_results['imgs']) + assert self.check_keys_contain(resize_results.keys(), target_keys) + resize_results_fuse = Fuse()(resize_results) + assert np.all(resize_results_fuse['scale_factor'] == np.array( + [341 / 320, 256 / 240], dtype=np.float32)) + assert resize_results_fuse['img_shape'] == (256, 341) + + assert repr(resize) == (f'{resize.__class__.__name__ }' + f'(scale={(341, 256)}, keep_ratio={False}, ' + + f'interpolation=bilinear, lazy={True})') + + def test_flip(self): + with pytest.raises(ValueError): + # direction must be in ['horizontal', 'vertical'] + Flip(direction='vertically') + + target_keys = ['imgs', 'flip_direction', 'modality'] + + # do not flip imgs. + imgs = list(np.random.rand(2, 64, 64, 3)) + results = dict(imgs=copy.deepcopy(imgs), modality='RGB') + flip = Flip(flip_ratio=0, direction='horizontal') + flip_results = flip(results) + assert self.check_keys_contain(flip_results.keys(), target_keys) + assert np.array_equal(imgs, results['imgs']) + assert id(flip_results['imgs']) == id(results['imgs']) + assert np.shape(flip_results['imgs']) == np.shape(imgs) + + # always flip imgs horizontally. + imgs = list(np.random.rand(2, 64, 64, 3)) + results = dict(imgs=copy.deepcopy(imgs), modality='RGB') + flip = Flip(flip_ratio=1, direction='horizontal') + flip_results = flip(results) + assert self.check_keys_contain(flip_results.keys(), target_keys) + if flip_results['flip'] is True: + assert self.check_flip(imgs, flip_results['imgs'], + flip_results['flip_direction']) + assert id(flip_results['imgs']) == id(results['imgs']) + assert np.shape(flip_results['imgs']) == np.shape(imgs) + + # flip flow images horizontally + imgs = [ + np.arange(16).reshape(4, 4).astype(np.float32), + np.arange(16, 32).reshape(4, 4).astype(np.float32) + ] + results = dict(imgs=copy.deepcopy(imgs), modality='Flow') + flip = Flip(flip_ratio=1, direction='horizontal') + flip_results = flip(results) + assert self.check_keys_contain(flip_results.keys(), target_keys) + imgs = [x.reshape(4, 4, 1) for x in imgs] + flip_results['imgs'] = [ + x.reshape(4, 4, 1) for x in flip_results['imgs'] + ] + if flip_results['flip'] is True: + assert self.check_flip([imgs[0]], + [mmcv.iminvert(flip_results['imgs'][0])], + flip_results['flip_direction']) + assert self.check_flip([imgs[1]], [flip_results['imgs'][1]], + flip_results['flip_direction']) + assert id(flip_results['imgs']) == id(results['imgs']) + assert np.shape(flip_results['imgs']) == np.shape(imgs) + + # always flip imgs vertivally. + imgs = list(np.random.rand(2, 64, 64, 3)) + results = dict(imgs=copy.deepcopy(imgs), modality='RGB') + flip = Flip(flip_ratio=1, direction='vertical') + flip_results = flip(results) + assert self.check_keys_contain(flip_results.keys(), target_keys) + if flip_results['flip'] is True: + assert self.check_flip(imgs, flip_results['imgs'], + flip_results['flip_direction']) + assert id(flip_results['imgs']) == id(results['imgs']) + assert np.shape(flip_results['imgs']) == np.shape(imgs) + + assert repr(flip) == (f'{flip.__class__.__name__}' + f'(flip_ratio={1}, direction=vertical, ' + f'lazy={False})') + + def test_flip_lazy(self): + with pytest.raises(ValueError): + Flip(direction='vertically', lazy=True) + + target_keys = ['imgs', 'flip_direction', 'modality'] + + # do not flip imgs. + imgs = list(np.random.rand(2, 64, 64, 3)) + imgs_tmp = imgs.copy() + results = dict(imgs=imgs_tmp, modality='RGB') + flip = Flip(flip_ratio=0, direction='horizontal', lazy=True) + flip_results = flip(results) + assert id(imgs_tmp) == id(flip_results['imgs']) + assert self.check_keys_contain(flip_results.keys(), target_keys) + flip_results_fuse = Fuse()(flip_results) + assert np.equal(imgs, results['imgs']).all() + assert id(flip_results['imgs']) == id(results['imgs']) + assert flip_results_fuse['imgs'][0].shape == (64, 64, 3) + + # always flip imgs horizontally. + imgs = list(np.random.rand(2, 64, 64, 3)) + imgs_tmp = imgs.copy() + results = dict(imgs=imgs_tmp, modality='RGB') + flip = Flip(flip_ratio=1, direction='horizontal', lazy=True) + flip_results = flip(results) + assert id(imgs_tmp) == id(flip_results['imgs']) + assert self.check_keys_contain(flip_results.keys(), target_keys) + flip_results_fuse = Fuse()(flip_results) + assert self.check_flip(imgs, flip_results['imgs'], + flip_results['flip_direction']) + assert id(flip_results['imgs']) == id(results['imgs']) + assert flip_results_fuse['imgs'][0].shape == (64, 64, 3) + + # always flip imgs vertivally. + imgs = list(np.random.rand(2, 64, 64, 3)) + imgs_tmp = imgs.copy() + results = dict(imgs=imgs_tmp, modality='RGB') + flip = Flip(flip_ratio=1, direction='vertical', lazy=True) + flip_results = flip(results) + assert id(imgs_tmp) == id(flip_results['imgs']) + assert self.check_keys_contain(flip_results.keys(), target_keys) + flip_results_fuse = Fuse()(flip_results) + assert self.check_flip(imgs, flip_results['imgs'], + flip_results['flip_direction']) + assert id(flip_results['imgs']) == id(results['imgs']) + assert flip_results_fuse['imgs'][0].shape == (64, 64, 3) + + assert repr(flip) == (f'{flip.__class__.__name__}' + f'(flip_ratio={1}, direction=vertical, ' + f'lazy={True})') + + def test_normalize(self): + with pytest.raises(TypeError): + # mean must be list, tuple or np.ndarray + Normalize( + dict(mean=[123.675, 116.28, 103.53]), [58.395, 57.12, 57.375]) + + with pytest.raises(TypeError): + # std must be list, tuple or np.ndarray + Normalize([123.675, 116.28, 103.53], + dict(std=[58.395, 57.12, 57.375])) + + target_keys = ['imgs', 'img_norm_cfg', 'modality'] + + # normalize imgs in RGB format + imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32)) + results = dict(imgs=imgs, modality='RGB') + config = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_bgr=False) + normalize = Normalize(**config) + normalize_results = normalize(results) + assert self.check_keys_contain(normalize_results.keys(), target_keys) + self.check_normalize(imgs, normalize_results['imgs'], + normalize_results['img_norm_cfg']) + + # normalize flow imgs + imgs = list(np.random.rand(4, 240, 320).astype(np.float32)) + results = dict(imgs=imgs, modality='Flow') + config = dict(mean=[128, 128], std=[128, 128]) + normalize = Normalize(**config) + normalize_results = normalize(results) + assert self.check_keys_contain(normalize_results.keys(), target_keys) + assert normalize_results['imgs'].shape == (2, 240, 320, 2) + x_components = np.array(imgs[0::2]) + y_components = np.array(imgs[1::2]) + x_components = (x_components - config['mean'][0]) / config['std'][0] + y_components = (y_components - config['mean'][1]) / config['std'][1] + result_imgs = np.stack([x_components, y_components], axis=-1) + assert np.all(np.isclose(result_imgs, normalize_results['imgs'])) + + # normalize imgs in BGR format + imgs = list(np.random.rand(2, 240, 320, 3).astype(np.float32)) + results = dict(imgs=imgs, modality='RGB') + config = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_bgr=True) + normalize = Normalize(**config) + normalize_results = normalize(results) + assert self.check_keys_contain(normalize_results.keys(), target_keys) + self.check_normalize(imgs, normalize_results['imgs'], + normalize_results['img_norm_cfg']) + + assert normalize.__repr__() == ( + normalize.__class__.__name__ + + f'(mean={np.array([123.675, 116.28, 103.53])}, ' + + f'std={np.array([58.395, 57.12, 57.375])}, to_bgr={True}, ' + f'adjust_magnitude={False})') + + def test_color_jitter(self): + imgs = list( + np.random.randint(0, 255, size=(3, 240, 320, 3), dtype=np.uint8)) + results = dict(imgs=imgs) + + eig_val = np.array([55.46, 4.794, 1.148], dtype=np.float32) + eig_vec = np.array([[-0.5675, 0.7192, 0.4009], + [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203]], + dtype=np.float32) + + color_jitter = ColorJitter() + assert_array_equal(color_jitter.eig_val, eig_val) + assert_array_equal(color_jitter.eig_vec, eig_vec) + assert color_jitter.alpha_std == 0.1 + assert color_jitter.color_space_aug is False + color_jitter_results = color_jitter(results) + target_keys = [ + 'imgs', 'eig_val', 'eig_vec', 'alpha_std', 'color_space_aug' + ] + assert self.check_keys_contain(color_jitter_results.keys(), + target_keys) + assert np.shape(color_jitter_results['imgs']) == (3, 240, 320, 3) + assert_array_equal(color_jitter_results['eig_val'], eig_val) + assert_array_equal(color_jitter_results['eig_vec'], eig_vec) + assert color_jitter_results['alpha_std'] == 0.1 + assert color_jitter_results['color_space_aug'] is False + + custom_eig_val = np.ones(3, ) + custom_eig_vec = np.ones((3, 3)) + + imgs = list( + np.random.randint(0, 255, size=(3, 240, 320, 3), dtype=np.uint8)) + results = dict(imgs=imgs) + custom_color_jitter = ColorJitter(True, 0.5, custom_eig_val, + custom_eig_vec) + assert_array_equal(color_jitter.eig_val, eig_val) + assert_array_equal(color_jitter.eig_vec, eig_vec) + assert custom_color_jitter.alpha_std == 0.5 + assert custom_color_jitter.color_space_aug is True + custom_color_jitter_results = custom_color_jitter(results) + assert np.shape(custom_color_jitter_results['imgs']) == (3, 240, 320, + 3) + assert_array_equal(custom_color_jitter_results['eig_val'], + custom_eig_val) + assert_array_equal(custom_color_jitter_results['eig_vec'], + custom_eig_vec) + assert custom_color_jitter_results['alpha_std'] == 0.5 + assert custom_color_jitter_results['color_space_aug'] is True + + color_jitter = ColorJitter() + assert repr(color_jitter) == (f'{color_jitter.__class__.__name__}(' + f'color_space_aug={False}, ' + f'alpha_std={0.1}, ' + f'eig_val={eig_val}, ' + f'eig_vec={eig_vec})') + + def test_center_crop(self): + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + CenterCrop(0.5) + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + CenterCrop('224') + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + CenterCrop([224, 224]) + + # center crop with crop_size 224 + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs) + center_crop = CenterCrop(crop_size=224) + center_crop_results = center_crop(results) + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + assert self.check_keys_contain(center_crop_results.keys(), target_keys) + assert self.check_crop(imgs, center_crop_results['imgs'], + center_crop_results['crop_bbox']) + assert np.all( + center_crop_results['crop_bbox'] == np.array([48, 8, 272, 232])) + assert center_crop_results['img_shape'] == (224, 224) + + assert repr(center_crop) == (f'{center_crop.__class__.__name__}' + f'(crop_size={(224, 224)}, lazy={False})') + + def test_center_crop_lazy(self): + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + CenterCrop(0.5) + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + CenterCrop('224') + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + CenterCrop([224, 224]) + + # center crop with crop_size 224 + imgs = list(np.random.rand(2, 240, 320, 3)) + results = dict(imgs=imgs) + center_crop = CenterCrop(crop_size=224, lazy=True) + center_crop_results = center_crop(results) + + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + assert self.check_keys_contain(center_crop_results.keys(), target_keys) + center_crop_results_fuse = Fuse()(center_crop_results) + assert self.check_crop(imgs, center_crop_results_fuse['imgs'], + center_crop_results['crop_bbox']) + assert np.all(center_crop_results_fuse['crop_bbox'] == np.array( + [48, 8, 272, 232])) + assert center_crop_results_fuse['img_shape'] == (224, 224) + + assert repr(center_crop) == (f'{center_crop.__class__.__name__}' + f'(crop_size={(224, 224)}, lazy={True})') + + def test_three_crop(self): + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + ThreeCrop(0.5) + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + ThreeCrop('224') + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + ThreeCrop([224, 224]) + + # three crop with crop_size 120 + imgs = list(np.random.rand(2, 240, 120, 3)) + results = dict(imgs=imgs) + three_crop = ThreeCrop(crop_size=120) + three_crop_results = three_crop(results) + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + assert self.check_keys_contain(three_crop_results.keys(), target_keys) + assert self.check_crop(imgs, three_crop_results['imgs'], + three_crop_results['crop_bbox'], 3) + assert three_crop_results['img_shape'] == (120, 120) + + # three crop with crop_size 224 + imgs = list(np.random.rand(2, 224, 224, 3)) + results = dict(imgs=imgs) + three_crop = ThreeCrop(crop_size=224) + three_crop_results = three_crop(results) + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + assert self.check_keys_contain(three_crop_results.keys(), target_keys) + assert self.check_crop(imgs, three_crop_results['imgs'], + three_crop_results['crop_bbox'], 3) + assert three_crop_results['img_shape'] == (224, 224) + + assert repr(three_crop) == (f'{three_crop.__class__.__name__}' + f'(crop_size={(224, 224)})') + + def test_ten_crop(self): + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + TenCrop(0.5) + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + TenCrop('224') + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + TenCrop([224, 224]) + + # ten crop with crop_size 256 + imgs = list(np.random.rand(2, 256, 256, 3)) + results = dict(imgs=imgs) + ten_crop = TenCrop(crop_size=224) + ten_crop_results = ten_crop(results) + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + assert self.check_keys_contain(ten_crop_results.keys(), target_keys) + assert self.check_crop(imgs, ten_crop_results['imgs'], + ten_crop_results['crop_bbox'], 10) + assert ten_crop_results['img_shape'] == (224, 224) + + assert repr(ten_crop) == (f'{ten_crop.__class__.__name__}' + f'(crop_size={(224, 224)})') + + def test_multi_group_crop(self): + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + MultiGroupCrop(0.5, 1) + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + MultiGroupCrop('224', 1) + + with pytest.raises(TypeError): + # crop_size must be int or tuple of int + MultiGroupCrop([224, 224], 1) + + with pytest.raises(TypeError): + # groups must be int + MultiGroupCrop(224, '1') + + with pytest.raises(ValueError): + # groups must be positive + MultiGroupCrop(224, 0) + + target_keys = ['imgs', 'crop_bbox', 'img_shape'] + + # multi_group_crop with crop_size 224, groups 3 + imgs = list(np.random.rand(2, 256, 341, 3)) + results = dict(imgs=imgs) + multi_group_crop = MultiGroupCrop(224, 3) + multi_group_crop_result = multi_group_crop(results) + assert self.check_keys_contain(multi_group_crop_result.keys(), + target_keys) + assert self.check_crop(imgs, multi_group_crop_result['imgs'], + multi_group_crop_result['crop_bbox'], + multi_group_crop.groups) + assert multi_group_crop_result['img_shape'] == (224, 224) + + assert repr(multi_group_crop) == ( + f'{multi_group_crop.__class__.__name__}' + f'(crop_size={(224, 224)}, groups={3})') + + def test_audio_amplify(self): + target_keys = ['audios', 'amplify_ratio'] + with pytest.raises(TypeError): + # ratio should be float + AudioAmplify(1) + + audio = (np.random.rand(8, )) + results = dict(audios=audio) + amplifier = AudioAmplify(1.5) + results = amplifier(results) + assert self.check_keys_contain(results.keys(), target_keys) + assert repr(amplifier) == (f'{amplifier.__class__.__name__}' + f'(ratio={amplifier.ratio})') + + def test_melspectrogram(self): + target_keys = ['audios'] + with pytest.raises(TypeError): + # ratio should be float + MelSpectrogram(window_size=12.5) + audio = (np.random.rand(1, 160000)) + + # test padding + results = dict(audios=audio, sample_rate=16000) + results['num_clips'] = 1 + results['sample_rate'] = 16000 + mel = MelSpectrogram() + results = mel(results) + assert self.check_keys_contain(results.keys(), target_keys) + + # test truncating + audio = (np.random.rand(1, 160000)) + results = dict(audios=audio, sample_rate=16000) + results['num_clips'] = 1 + results['sample_rate'] = 16000 + mel = MelSpectrogram(fixed_length=1) + results = mel(results) + assert self.check_keys_contain(results.keys(), target_keys) + assert repr(mel) == (f'{mel.__class__.__name__}' + f'(window_size={mel.window_size}), ' + f'step_size={mel.step_size}, ' + f'n_mels={mel.n_mels}, ' + f'fixed_length={mel.fixed_length})') + + def test_random_scale(self): + scales = ((200, 64), (250, 80)) + with pytest.raises(ValueError): + RandomScale(scales, 'unsupport') + + with pytest.raises(ValueError): + random_scale = RandomScale([(800, 256), (1000, 320), (800, 320)]) + random_scale({}) + + imgs = list(np.random.rand(2, 340, 256, 3)) + results = dict(imgs=imgs, img_shape=(340, 256)) + + results_ = copy.deepcopy(results) + random_scale_range = RandomScale(scales) + results_ = random_scale_range(results_) + assert 200 <= results_['scale'][0] <= 250 + assert 64 <= results_['scale'][1] <= 80 + + results_ = copy.deepcopy(results) + random_scale_value = RandomScale(scales, 'value') + results_ = random_scale_value(results_) + assert results_['scale'] in scales + + random_scale_single = RandomScale([(200, 64)]) + results_ = copy.deepcopy(results) + results_ = random_scale_single(results_) + assert results_['scale'] == (200, 64) + + assert repr(random_scale_range) == ( + f'{random_scale_range.__class__.__name__}' + f'(scales={((200, 64), (250, 80))}, ' + 'mode=range)') + + def test_box_rescale(self): + target_keys = ['img_shape', 'scale_factor', 'ann', 'proposals'] + results = dict( + img_shape=(520, 480), + scale_factor=(0.7, 0.8), + proposals=np.array([[0.011, 0.157, 0.655, 0.983, 0.998163]]), + ann=dict(entity_boxes=np.array([[0.031, 0.162, 0.67, 0.995]]))) + + with pytest.raises(AssertionError): + box_scale = EntityBoxRescale() + results_ = copy.deepcopy(results) + results_['proposals'] = np.array([[0.011, 0.157, 0.655]]) + box_scale(results_) + + box_scale = EntityBoxRescale() + results_ = copy.deepcopy(results) + results_ = box_scale(results_) + self.check_keys_contain(results_.keys(), target_keys + ['scores']) + assert_array_almost_equal( + results_['proposals'], + np.array([[3.696000, 65.311999, 220.079995, 408.928002]])) + assert_array_almost_equal( + results_['ann']['entity_boxes'], + np.array([[10.416000, 67.391998, 225.120004, 413.920019]])) + assert results_['scores'] == np.array([0.998163], dtype=np.float32) + + results_ = copy.deepcopy(results) + results_['proposals'] = None + results_ = box_scale(results_) + self.check_keys_contain(results_.keys(), target_keys) + assert results_['proposals'] is None + + def test_box_crop(self): + target_keys = ['ann', 'proposals', 'crop_bbox'] + results = dict( + proposals=np.array([[3.696000, 65.311999, 220.079995, + 408.928002]]), + crop_bbox=[13, 75, 200, 380], + ann=dict( + entity_boxes=np.array( + [[10.416000, 67.391998, 225.120004, 413.920019]]))) + + box_crop = EntityBoxCrop() + results_ = copy.deepcopy(results) + results_ = box_crop(results_) + self.check_keys_contain(results_.keys(), target_keys) + assert_array_almost_equal( + results_['ann']['entity_boxes'], + np.array([[-2.584, -7.608002, 212.120004, 338.920019]])) + assert_array_almost_equal( + results_['proposals'], + np.array([[-9.304, -9.688001, 207.079995, 333.928002]])) + + results_ = copy.deepcopy(results) + results_['proposals'] = None + results_ = box_crop(results_) + assert results_['proposals'] is None + + def test_box_flip(self): + target_keys = ['ann', 'proposals', 'img_shape'] + results = dict( + proposals=np.array([[-9.304, -9.688001, 207.079995, 333.928002]]), + img_shape=(520, 480), + ann=dict( + entity_boxes=np.array( + [[-2.584, -7.608002, 212.120004, 338.920019]]))) + + with pytest.raises(ValueError): + EntityBoxFlip(0, 'unsupport') + + box_flip = EntityBoxFlip(flip_ratio=1) + results_ = copy.deepcopy(results) + results_ = box_flip(results_) + self.check_keys_contain(results_.keys(), target_keys) + assert_array_almost_equal( + results_['ann']['entity_boxes'], + np.array([[266.879996, -7.608002, 481.584, 338.920019]])) + assert_array_almost_equal( + results_['proposals'], + np.array([[271.920005, -9.688001, 488.304, 333.928002]])) + + box_flip = EntityBoxFlip(flip_ratio=1, direction='vertical') + results_ = copy.deepcopy(results) + results_ = box_flip(results_) + self.check_keys_contain(results_.keys(), target_keys) + assert_array_almost_equal( + results_['ann']['entity_boxes'], + np.array([[-2.584, 180.079981, 212.120004, 526.608002]])) + assert_array_almost_equal( + results_['proposals'], + np.array([[-9.304, 185.071998, 207.079995, 528.688001]])) + + box_flip = EntityBoxFlip() + results_ = copy.deepcopy(results) + results_['proposals'] = None + results_ = box_flip(results_) + assert results_['proposals'] is None + + assert repr(box_flip) == (f'{box_flip.__class__.__name__}' + '(flip_ratio=0.5, direction=horizontal)') + + def test_box_clip(self): + target_keys = ['ann', 'proposals', 'img_shape'] + results = dict( + proposals=np.array([[-9.304, -9.688001, 207.079995, 333.928002]]), + img_shape=(335, 210), + ann=dict( + entity_boxes=np.array( + [[-2.584, -7.608002, 212.120004, 338.920019]]))) + + box_clip = EntityBoxClip() + results_ = copy.deepcopy(results) + results_ = box_clip(results_) + + self.check_keys_contain(results_.keys(), target_keys) + assert_array_equal(results_['ann']['entity_boxes'], + np.array([[0., 0., 209., 334.]])) + assert_array_equal(results_['proposals'], + np.array([[0., 0., 207.079995, 333.928002]])) + + results_ = copy.deepcopy(results) + results_['proposals'] = None + results_ = box_clip(results_) + assert results_['proposals'] is None + + def test_box_pad(self): + target_keys = ['ann', 'proposals', 'img_shape'] + results = dict( + proposals=np.array([[-9.304, -9.688001, 207.079995, 333.928002], + [-2.584, -7.608002, 212.120004, 338.920019]]), + img_shape=(335, 210), + ann=dict( + entity_boxes=np.array([[ + -2.584, -7.608002, 212.120004, 338.920019 + ], [-9.304, -9.688001, 207.079995, 333.928002]]))) + + box_pad_none = EntityBoxPad() + results_ = copy.deepcopy(results) + results_ = box_pad_none(results_) + self.check_keys_contain(results_.keys(), target_keys) + assert_array_equal(results_['proposals'], results['proposals']) + assert_array_equal(results_['ann']['entity_boxes'], + results['ann']['entity_boxes']) + + box_pad = EntityBoxPad(3) + results_ = copy.deepcopy(results) + results_ = box_pad(results_) + self.check_keys_contain(results_.keys(), target_keys) + assert_array_equal( + results_['proposals'], + np.array([[-9.304, -9.688001, 207.079995, 333.928002], + [-2.584, -7.608002, 212.120004, 338.920019], + [0., 0., 0., 0.]], + dtype=np.float32)) + assert_array_equal( + results_['ann']['entity_boxes'], + np.array([[-2.584, -7.608002, 212.120004, 338.920019], + [-9.304, -9.688001, 207.079995, 333.928002], + [0., 0., 0., 0.]], + dtype=np.float32)) + + results_ = copy.deepcopy(results) + results_['proposals'] = None + results_ = box_pad(results_) + assert results_['proposals'] is None + + assert repr(box_pad) == (f'{box_pad.__class__.__name__}' + '(max_num_gts=3)') diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_data/test_ava_dataset.py b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_ava_dataset.py new file mode 100644 index 0000000..a4ef60f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_ava_dataset.py @@ -0,0 +1,125 @@ +import os.path as osp + +import mmcv +import numpy as np +from numpy.testing import assert_array_equal + +from mmaction.datasets import AVADataset + + +def check_keys_contain(result_keys, target_keys): + """Check if all elements in target_keys is in result_keys.""" + return set(target_keys).issubset(set(result_keys)) + + +class TestAVADataset(object): + + @classmethod + def setup_class(cls): + cls.data_prefix = osp.join( + osp.dirname(osp.dirname(__file__)), 'data', 'test_ava_dataset') + cls.ann_file = osp.join(cls.data_prefix, 'ava_sample.csv') + cls.exclude_file = osp.join(cls.data_prefix, + 'ava_excluded_timestamps_sample.csv') + cls.proposal_file = osp.join(cls.data_prefix, + 'ava_proposals_sample.pkl') + cls.pipeline = [ + dict(dict(type='SampleAVAFrames', clip_len=32, frame_interval=2)) + ] + cls.proposal = mmcv.load(cls.proposal_file) + + def test_ava_dataset(self): + target_keys = [ + 'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info', + 'fps', 'ann' + ] + ann_keys = ['labels', 'entity_boxes', 'entity_ids'] + pkl_keys = ['0f39OWEqJ24,0902', '0f39OWEqJ24,0903', '_-Z6wFjXtGQ,0902'] + + ava_dataset = AVADataset( + self.ann_file, + self.exclude_file, + self.pipeline, + data_prefix=self.data_prefix, + proposal_file=self.proposal_file) + ava_infos = ava_dataset.video_infos + assert check_keys_contain(ava_dataset.proposals.keys(), pkl_keys) + + assert check_keys_contain(ava_infos[0].keys(), target_keys) + assert check_keys_contain(ava_infos[0]['ann'].keys(), ann_keys) + assert len(ava_infos) == 1 + assert ava_infos[0]['frame_dir'] == osp.join(self.data_prefix, + '0f39OWEqJ24') + assert ava_infos[0]['video_id'] == '0f39OWEqJ24' + assert ava_infos[0]['timestamp'] == 902 + assert ava_infos[0]['img_key'] == '0f39OWEqJ24,0902' + assert ava_infos[0]['shot_info'] == (0, 26880) + assert ava_infos[0]['fps'] == 30 + assert len(ava_infos[0]['ann']) == 3 + target_labels = np.array([12, 17, 79] + [ + -1, + ] * 78) + target_labels = target_labels[None, ...] + assert_array_equal(ava_infos[0]['ann']['labels'], target_labels) + assert_array_equal(ava_infos[0]['ann']['entity_boxes'], + np.array([[0.031, 0.162, 0.67, 0.995]])) + assert_array_equal(ava_infos[0]['ann']['entity_ids'], np.array([0])) + + ava_dataset = AVADataset( + self.ann_file, + None, + self.pipeline, + data_prefix=self.data_prefix, + proposal_file=self.proposal_file) + ava_infos = ava_dataset.video_infos + assert len(ava_infos) == 3 + + ava_dataset = AVADataset( + self.ann_file, + None, + self.pipeline, + test_mode=True, + data_prefix=self.data_prefix, + proposal_file=self.proposal_file) + ava_infos = ava_dataset.video_infos + assert len(ava_infos) == 3 + + ava_dataset = AVADataset( + self.ann_file, + None, + self.pipeline, + test_mode=True, + data_prefix=self.data_prefix, + proposal_file=None) + assert ava_dataset.proposals is None + + def test_ava_pipeline(self): + target_keys = [ + 'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info', + 'fps', 'ann', 'filename_tmpl', 'modality', 'start_index', + 'timestamp_start', 'timestamp_end', 'proposals', 'frame_inds', + 'clip_len', 'frame_interval' + ] + ann_keys = ['labels', 'entity_boxes', 'entity_ids'] + + ava_dataset = AVADataset( + self.ann_file, + self.exclude_file, + self.pipeline, + data_prefix=self.data_prefix, + proposal_file=self.proposal_file) + result = ava_dataset[0] + assert check_keys_contain(result.keys(), target_keys) + assert check_keys_contain(result['ann'].keys(), ann_keys) + + assert result['filename_tmpl'] == 'img_{:05}.jpg' + assert result['modality'] == 'RGB' + assert result['start_index'] == 1 + assert result['timestamp_start'] == 902 + assert result['timestamp_end'] == 1798 + assert_array_equal(result['proposals'], + np.array([[0.011, 0.157, 0.655, 0.983, 0.998163]])) + + assert result['clip_len'] == 32 + assert result['frame_interval'] == 2 + assert len(result['frame_inds']) == 32 diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_data/test_compose.py b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_compose.py new file mode 100644 index 0000000..0a4ba05 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_compose.py @@ -0,0 +1,41 @@ +import numpy as np +import pytest + +from mmaction.datasets.pipelines import Compose, ImageToTensor + + +def check_keys_equal(result_keys, target_keys): + """Check if all elements in target_keys is in result_keys.""" + return set(target_keys) == set(result_keys) + + +def test_compose(): + with pytest.raises(TypeError): + # transform must be callable or a dict + Compose('LoadImage') + + target_keys = ['img', 'img_meta'] + + # test Compose given a data pipeline + img = np.random.randn(256, 256, 3) + results = dict(img=img, abandoned_key=None, img_name='test_image.png') + test_pipeline = [ + dict(type='Collect', keys=['img'], meta_keys=['img_name']), + dict(type='ImageToTensor', keys=['img']) + ] + compose = Compose(test_pipeline) + compose_results = compose(results) + assert check_keys_equal(compose_results.keys(), target_keys) + assert check_keys_equal(compose_results['img_meta'].data.keys(), + ['img_name']) + + # test Compose when forward data is None + results = None + image_to_tensor = ImageToTensor(keys=[]) + test_pipeline = [image_to_tensor] + compose = Compose(test_pipeline) + compose_results = compose(results) + assert compose_results is None + + assert repr(compose) == compose.__class__.__name__ + \ + f'(\n {image_to_tensor}\n)' diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_data/test_dataset.py b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_dataset.py new file mode 100644 index 0000000..71918d4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_dataset.py @@ -0,0 +1,972 @@ +import os +import os.path as osp +import tempfile + +import mmcv +import numpy as np +import pytest +from mmcv import ConfigDict +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from mmaction.datasets import (ActivityNetDataset, AudioDataset, + AudioFeatureDataset, AudioVisualDataset, + HVUDataset, RawframeDataset, RawVideoDataset, + RepeatDataset, SSNDataset, VideoDataset) + + +class TestDataset: + + @staticmethod + def check_keys_contain(result_keys, target_keys): + """Check if all elements in target_keys is in result_keys.""" + return set(target_keys).issubset(set(result_keys)) + + @classmethod + def setup_class(cls): + cls.data_prefix = osp.join(osp.dirname(osp.dirname(__file__)), 'data') + cls.frame_ann_file = osp.join(cls.data_prefix, 'frame_test_list.txt') + cls.frame_ann_file_with_offset = osp.join( + cls.data_prefix, 'frame_test_list_with_offset.txt') + cls.frame_ann_file_multi_label = osp.join( + cls.data_prefix, 'frame_test_list_multi_label.txt') + cls.video_ann_file = osp.join(cls.data_prefix, 'video_test_list.txt') + cls.hvu_video_ann_file = osp.join(cls.data_prefix, + 'hvu_video_test_anno.json') + cls.hvu_video_eval_ann_file = osp.join( + cls.data_prefix, 'hvu_video_eval_test_anno.json') + cls.hvu_frame_ann_file = osp.join(cls.data_prefix, + 'hvu_frame_test_anno.json') + cls.action_ann_file = osp.join(cls.data_prefix, + 'action_test_anno.json') + cls.proposal_ann_file = osp.join(cls.data_prefix, + 'proposal_test_list.txt') + cls.proposal_norm_ann_file = osp.join(cls.data_prefix, + 'proposal_normalized_list.txt') + cls.audio_ann_file = osp.join(cls.data_prefix, 'audio_test_list.txt') + cls.audio_feature_ann_file = osp.join(cls.data_prefix, + 'audio_feature_test_list.txt') + cls.rawvideo_test_anno_txt = osp.join(cls.data_prefix, + 'rawvideo_test_anno.txt') + cls.rawvideo_test_anno_json = osp.join(cls.data_prefix, + 'rawvideo_test_anno.json') + cls.rawvideo_pipeline = [] + + cls.frame_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1), + dict(type='RawFrameDecode', io_backend='disk') + ] + cls.audio_pipeline = [ + dict(type='AudioDecodeInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1), + dict(type='AudioDecode') + ] + cls.audio_feature_pipeline = [ + dict(type='LoadAudioFeature'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1), + dict(type='AudioFeatureSelector') + ] + cls.video_pipeline = [ + dict(type='OpenCVInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1), + dict(type='OpenCVDecode') + ] + cls.action_pipeline = [] + cls.proposal_pipeline = [ + dict( + type='SampleProposalFrames', + clip_len=1, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5), + dict(type='RawFrameDecode', io_backend='disk') + ] + cls.proposal_test_pipeline = [ + dict( + type='SampleProposalFrames', + clip_len=1, + body_segments=5, + aug_segments=(2, 2), + aug_ratio=0.5, + mode='test'), + dict(type='RawFrameDecode', io_backend='disk') + ] + + cls.proposal_train_cfg = ConfigDict( + dict( + ssn=dict( + assigner=dict( + positive_iou_threshold=0.7, + background_iou_threshold=0.01, + incomplete_iou_threshold=0.5, + background_coverage_threshold=0.02, + incomplete_overlap_threshold=0.01), + sampler=dict( + num_per_video=8, + positive_ratio=1, + background_ratio=1, + incomplete_ratio=6, + add_gt_as_proposals=True), + loss_weight=dict( + comp_loss_weight=0.1, reg_loss_weight=0.1), + debug=False))) + cls.proposal_test_cfg = ConfigDict( + dict( + ssn=dict( + sampler=dict(test_interval=6, batch_size=16), + evaluater=dict( + top_k=2000, + nms=0.2, + softmax_before_filter=True, + cls_top_k=2)))) + cls.proposal_test_cfg_topall = ConfigDict( + dict( + ssn=dict( + sampler=dict(test_interval=6, batch_size=16), + evaluater=dict( + top_k=-1, + nms=0.2, + softmax_before_filter=True, + cls_top_k=2)))) + + cls.hvu_categories = [ + 'action', 'attribute', 'concept', 'event', 'object', 'scene' + ] + + cls.hvu_category_nums = [739, 117, 291, 69, 1679, 248] + + cls.hvu_categories_for_eval = ['action', 'scene', 'object'] + cls.hvu_category_nums_for_eval = [3, 3, 3] + + cls.filename_tmpl = 'img_{:05d}.jpg' + + def test_rawvideo_dataset(self): + # Try to load txt file + rawvideo_dataset = RawVideoDataset( + ann_file=self.rawvideo_test_anno_txt, + pipeline=self.rawvideo_pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='positive', + data_prefix=self.data_prefix) + result = rawvideo_dataset[0] + clipname = osp.join(self.data_prefix, 'test_rawvideo_dataset', + 'part_0.mp4') + assert result['filename'] == clipname + + # Try to load json file + rawvideo_dataset = RawVideoDataset( + ann_file=self.rawvideo_test_anno_json, + pipeline=self.rawvideo_pipeline, + clipname_tmpl='part_{}.mp4', + sampling_strategy='random', + data_prefix=self.data_prefix, + test_mode=True) + result = rawvideo_dataset[0] + + def test_hvu_dataset(self): + hvu_frame_dataset = HVUDataset( + ann_file=self.hvu_frame_ann_file, + pipeline=self.frame_pipeline, + tag_categories=self.hvu_categories, + tag_category_nums=self.hvu_category_nums, + filename_tmpl=self.filename_tmpl, + data_prefix=self.data_prefix, + start_index=1) + hvu_frame_infos = hvu_frame_dataset.video_infos + frame_dir = osp.join(self.data_prefix, 'test_imgs') + assert hvu_frame_infos == [ + dict( + frame_dir=frame_dir, + total_frames=5, + label=dict( + concept=[250, 131, 42, 51, 57, 155, 122], + object=[1570, 508], + event=[16], + action=[180], + scene=[206]), + categories=self.hvu_categories, + category_nums=self.hvu_category_nums, + filename_tmpl=self.filename_tmpl, + start_index=1, + modality='RGB') + ] * 2 + + hvu_video_dataset = HVUDataset( + ann_file=self.hvu_video_ann_file, + pipeline=self.video_pipeline, + tag_categories=self.hvu_categories, + tag_category_nums=self.hvu_category_nums, + data_prefix=self.data_prefix) + hvu_video_infos = hvu_video_dataset.video_infos + filename = osp.join(self.data_prefix, 'tmp.mp4') + assert hvu_video_infos == [ + dict( + filename=filename, + label=dict( + concept=[250, 131, 42, 51, 57, 155, 122], + object=[1570, 508], + event=[16], + action=[180], + scene=[206]), + categories=self.hvu_categories, + category_nums=self.hvu_category_nums) + ] * 2 + + hvu_video_eval_dataset = HVUDataset( + ann_file=self.hvu_video_eval_ann_file, + pipeline=self.video_pipeline, + tag_categories=self.hvu_categories_for_eval, + tag_category_nums=self.hvu_category_nums_for_eval, + data_prefix=self.data_prefix) + + results = [ + np.array([ + -1.59812844, 0.24459082, 1.38486497, 0.28801252, 1.09813449, + -0.28696971, 0.0637848, 0.22877678, -1.82406999 + ]), + np.array([ + 0.87904563, 1.64264224, 0.46382051, 0.72865088, -2.13712525, + 1.28571358, 1.01320328, 0.59292737, -0.05502892 + ]) + ] + mAP = hvu_video_eval_dataset.evaluate(results) + assert_array_almost_equal(mAP['action_mAP'], 1.0) + assert_array_almost_equal(mAP['scene_mAP'], 0.5) + assert_array_almost_equal(mAP['object_mAP'], 0.75) + + def test_rawframe_dataset(self): + rawframe_dataset = RawframeDataset(self.frame_ann_file, + self.frame_pipeline, + self.data_prefix) + rawframe_infos = rawframe_dataset.video_infos + frame_dir = osp.join(self.data_prefix, 'test_imgs') + assert rawframe_infos == [ + dict(frame_dir=frame_dir, total_frames=5, label=127) + ] * 2 + assert rawframe_dataset.start_index == 1 + + def test_audio_dataset(self): + audio_dataset = AudioDataset( + self.audio_ann_file, + self.audio_pipeline, + data_prefix=self.data_prefix) + audio_infos = audio_dataset.video_infos + wav_path = osp.join(self.data_prefix, 'test.wav') + assert audio_infos == [ + dict(audio_path=wav_path, total_frames=100, label=127) + ] * 2 + + def test_audio_feature_dataset(self): + audio_dataset = AudioFeatureDataset( + self.audio_feature_ann_file, + self.audio_feature_pipeline, + data_prefix=self.data_prefix) + audio_infos = audio_dataset.video_infos + feature_path = osp.join(self.data_prefix, 'test.npy') + assert audio_infos == [ + dict(audio_path=feature_path, total_frames=100, label=127) + ] * 2 + + def test_rawframe_dataset_with_offset(self): + rawframe_dataset = RawframeDataset( + self.frame_ann_file_with_offset, + self.frame_pipeline, + self.data_prefix, + with_offset=True) + rawframe_infos = rawframe_dataset.video_infos + frame_dir = osp.join(self.data_prefix, 'test_imgs') + assert rawframe_infos == [ + dict(frame_dir=frame_dir, offset=2, total_frames=5, label=127) + ] * 2 + assert rawframe_dataset.start_index == 1 + + def test_rawframe_dataset_multi_label(self): + rawframe_dataset = RawframeDataset( + self.frame_ann_file_multi_label, + self.frame_pipeline, + self.data_prefix, + multi_class=True, + num_classes=100) + rawframe_infos = rawframe_dataset.video_infos + frame_dir = osp.join(self.data_prefix, 'test_imgs') + label0 = [1] + label1 = [3, 5] + labels = [label0, label1] + for info, label in zip(rawframe_infos, labels): + assert info['frame_dir'] == frame_dir + assert info['total_frames'] == 5 + assert set(info['label']) == set(label) + assert rawframe_dataset.start_index == 1 + + def test_audio_visual_dataset(self): + test_dataset = AudioVisualDataset( + self.frame_ann_file, + self.frame_pipeline, + self.data_prefix, + video_prefix=self.data_prefix, + data_prefix=self.data_prefix) + video_infos = test_dataset.video_infos + frame_dir = osp.join(self.data_prefix, 'test_imgs') + audio_path = osp.join(self.data_prefix, 'test_imgs.npy') + filename = osp.join(self.data_prefix, 'test_imgs.mp4') + assert video_infos == [ + dict( + frame_dir=frame_dir, + audio_path=audio_path, + filename=filename, + total_frames=5, + label=127) + ] * 2 + assert test_dataset.start_index == 1 + + def test_dataset_realpath(self): + dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, + '.') + assert dataset.data_prefix == osp.realpath('.') + dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, + 's3://good') + assert dataset.data_prefix == 's3://good' + + dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline) + assert dataset.data_prefix is None + assert dataset.video_infos[0]['frame_dir'] == 'test_imgs' + + def test_video_dataset(self): + video_dataset = VideoDataset( + self.video_ann_file, + self.video_pipeline, + data_prefix=self.data_prefix) + video_infos = video_dataset.video_infos + video_filename = osp.join(self.data_prefix, 'test.mp4') + assert video_infos == [dict(filename=video_filename, label=0)] * 2 + assert video_dataset.start_index == 0 + + def test_rawframe_pipeline(self): + target_keys = [ + 'frame_dir', 'total_frames', 'label', 'filename_tmpl', + 'start_index', 'modality' + ] + + # RawframeDataset not in test mode + rawframe_dataset = RawframeDataset( + self.frame_ann_file, + self.frame_pipeline, + self.data_prefix, + test_mode=False) + result = rawframe_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # RawframeDataset in multi-class tasks + rawframe_dataset = RawframeDataset( + self.frame_ann_file, + self.frame_pipeline, + self.data_prefix, + multi_class=True, + num_classes=400, + test_mode=False) + result = rawframe_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # RawframeDataset with offset + rawframe_dataset = RawframeDataset( + self.frame_ann_file_with_offset, + self.frame_pipeline, + self.data_prefix, + with_offset=True, + num_classes=400, + test_mode=False) + result = rawframe_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys + ['offset']) + + # RawframeDataset in test mode + rawframe_dataset = RawframeDataset( + self.frame_ann_file, + self.frame_pipeline, + self.data_prefix, + test_mode=True) + result = rawframe_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # RawframeDataset in multi-class tasks in test mode + rawframe_dataset = RawframeDataset( + self.frame_ann_file, + self.frame_pipeline, + self.data_prefix, + multi_class=True, + num_classes=400, + test_mode=True) + result = rawframe_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # RawframeDataset with offset + rawframe_dataset = RawframeDataset( + self.frame_ann_file_with_offset, + self.frame_pipeline, + self.data_prefix, + with_offset=True, + num_classes=400, + test_mode=True) + result = rawframe_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys + ['offset']) + + def test_audio_pipeline(self): + target_keys = [ + 'audio_path', 'label', 'start_index', 'modality', 'audios_shape', + 'length', 'sample_rate', 'total_frames' + ] + + # Audio dataset not in test mode + audio_dataset = AudioDataset( + self.audio_ann_file, + self.audio_pipeline, + data_prefix=self.data_prefix, + test_mode=False) + result = audio_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # Audio dataset in test mode + audio_dataset = AudioDataset( + self.audio_ann_file, + self.audio_pipeline, + data_prefix=self.data_prefix, + test_mode=True) + result = audio_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + def test_audio_feature_pipeline(self): + target_keys = [ + 'audio_path', 'label', 'start_index', 'modality', 'audios', + 'total_frames' + ] + + # Audio feature dataset not in test mode + audio_feature_dataset = AudioFeatureDataset( + self.audio_feature_ann_file, + self.audio_feature_pipeline, + data_prefix=self.data_prefix, + test_mode=False) + result = audio_feature_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # Audio dataset in test mode + audio_feature_dataset = AudioFeatureDataset( + self.audio_feature_ann_file, + self.audio_feature_pipeline, + data_prefix=self.data_prefix, + test_mode=True) + result = audio_feature_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + def test_video_pipeline(self): + target_keys = ['filename', 'label', 'start_index', 'modality'] + + # VideoDataset not in test mode + video_dataset = VideoDataset( + self.video_ann_file, + self.video_pipeline, + data_prefix=self.data_prefix, + test_mode=False) + result = video_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # VideoDataset in test mode + video_dataset = VideoDataset( + self.video_ann_file, + self.video_pipeline, + data_prefix=self.data_prefix, + test_mode=True) + result = video_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + def test_action_pipeline(self): + target_keys = ['video_name', 'data_prefix'] + + # ActivityNet Dataset not in test mode + action_dataset = ActivityNetDataset( + self.action_ann_file, + self.action_pipeline, + self.data_prefix, + test_mode=False) + result = action_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # ActivityNet Dataset in test mode + action_dataset = ActivityNetDataset( + self.action_ann_file, + self.action_pipeline, + self.data_prefix, + test_mode=True) + result = action_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + def test_proposal_pipeline(self): + target_keys = [ + 'frame_dir', 'video_id', 'total_frames', 'gts', 'proposals', + 'filename_tmpl', 'modality', 'out_proposals', 'reg_targets', + 'proposal_scale_factor', 'proposal_labels', 'proposal_type', + 'start_index' + ] + + # SSN Dataset not in test mode + proposal_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix) + result = proposal_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + # SSN Dataset with random sampling proposals + proposal_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix, + video_centric=False) + result = proposal_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + target_keys = [ + 'frame_dir', 'video_id', 'total_frames', 'gts', 'proposals', + 'filename_tmpl', 'modality', 'relative_proposal_list', + 'scale_factor_list', 'proposal_tick_list', 'reg_norm_consts', + 'start_index' + ] + + # SSN Dataset in test mode + proposal_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_test_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix, + test_mode=True) + result = proposal_dataset[0] + assert self.check_keys_contain(result.keys(), target_keys) + + def test_rawframe_evaluate(self): + rawframe_dataset = RawframeDataset(self.frame_ann_file, + self.frame_pipeline, + self.data_prefix) + + with pytest.raises(TypeError): + # results must be a list + rawframe_dataset.evaluate('0.5') + + with pytest.raises(AssertionError): + # The length of results must be equal to the dataset len + rawframe_dataset.evaluate([0] * 5) + + with pytest.raises(TypeError): + # topk must be int or tuple of int + rawframe_dataset.evaluate( + [0] * len(rawframe_dataset), + metric_options=dict(top_k_accuracy=dict(topk=1.))) + + with pytest.raises(KeyError): + # unsupported metric + rawframe_dataset.evaluate( + [0] * len(rawframe_dataset), metrics='iou') + + # evaluate top_k_accuracy and mean_class_accuracy metric + results = [np.array([0.1, 0.5, 0.4])] * 2 + eval_result = rawframe_dataset.evaluate( + results, metrics=['top_k_accuracy', 'mean_class_accuracy']) + assert set(eval_result.keys()) == set( + ['top1_acc', 'top5_acc', 'mean_class_accuracy']) + + def test_video_evaluate(self): + video_dataset = VideoDataset( + self.video_ann_file, + self.video_pipeline, + data_prefix=self.data_prefix) + + with pytest.raises(TypeError): + # results must be a list + video_dataset.evaluate('0.5') + + with pytest.raises(AssertionError): + # The length of results must be equal to the dataset len + video_dataset.evaluate([0] * 5) + + with pytest.raises(TypeError): + # topk must be int or tuple of int + video_dataset.evaluate( + [0] * len(video_dataset), + metric_options=dict(top_k_accuracy=dict(topk=1.))) + + with pytest.raises(KeyError): + # unsupported metric + video_dataset.evaluate([0] * len(video_dataset), metrics='iou') + + # evaluate top_k_accuracy and mean_class_accuracy metric + results = [np.array([0.1, 0.5, 0.4])] * 2 + eval_result = video_dataset.evaluate( + results, metrics=['top_k_accuracy', 'mean_class_accuracy']) + assert set(eval_result.keys()) == set( + ['top1_acc', 'top5_acc', 'mean_class_accuracy']) + + def test_base_dataset(self): + video_dataset = VideoDataset( + self.video_ann_file, + self.video_pipeline, + data_prefix=self.data_prefix, + start_index=3) + assert len(video_dataset) == 2 + assert video_dataset.start_index == 3 + + def test_repeat_dataset(self): + rawframe_dataset = RawframeDataset(self.frame_ann_file, + self.frame_pipeline, + self.data_prefix) + repeat_dataset = RepeatDataset(rawframe_dataset, 5) + assert len(repeat_dataset) == 10 + result_a = repeat_dataset[0] + result_b = repeat_dataset[2] + assert set(result_a.keys()) == set(result_b.keys()) + for key in result_a: + if isinstance(result_a[key], np.ndarray): + assert np.equal(result_a[key], result_b[key]).all() + elif isinstance(result_a[key], list): + assert all( + np.array_equal(a, b) + for (a, b) in zip(result_a[key], result_b[key])) + else: + assert result_a[key] == result_b[key] + + def test_activitynet_dataset(self): + activitynet_dataset = ActivityNetDataset(self.action_ann_file, + self.action_pipeline, + self.data_prefix) + activitynet_infos = activitynet_dataset.video_infos + assert activitynet_infos == [ + dict( + video_name='v_test1', + duration_second=1, + duration_frame=30, + annotations=[dict(segment=[0.3, 0.6], label='Rock climbing')], + feature_frame=30, + fps=30.0, + rfps=30), + dict( + video_name='v_test2', + duration_second=2, + duration_frame=48, + annotations=[dict(segment=[1.0, 2.0], label='Drinking beer')], + feature_frame=48, + fps=24.0, + rfps=24.0) + ] + + def test_activitynet_proposals2json(self): + activitynet_dataset = ActivityNetDataset(self.action_ann_file, + self.action_pipeline, + self.data_prefix) + results = [ + dict( + video_name='v_test1', + proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]), + dict( + video_name='v_test2', + proposal_list=[dict(segment=[10.1, 20.9], score=0.9)]) + ] + result_dict = activitynet_dataset.proposals2json(results) + assert result_dict == dict( + test1=[{ + 'segment': [0.1, 0.9], + 'score': 0.1 + }], + test2=[{ + 'segment': [10.1, 20.9], + 'score': 0.9 + }]) + result_dict = activitynet_dataset.proposals2json(results, True) + assert result_dict == dict( + test1=[{ + 'segment': [0.1, 0.9], + 'score': 0.1 + }], + test2=[{ + 'segment': [10.1, 20.9], + 'score': 0.9 + }]) + + def test_activitynet_evaluate(self): + activitynet_dataset = ActivityNetDataset(self.action_ann_file, + self.action_pipeline, + self.data_prefix) + + with pytest.raises(TypeError): + # results must be a list + activitynet_dataset.evaluate('0.5') + + with pytest.raises(AssertionError): + # The length of results must be equal to the dataset len + activitynet_dataset.evaluate([0] * 5) + + with pytest.raises(KeyError): + # unsupported metric + activitynet_dataset.evaluate( + [0] * len(activitynet_dataset), metrics='iou') + + # evaluate AR@AN metric + results = [ + dict( + video_name='v_test1', + proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]), + dict( + video_name='v_test2', + proposal_list=[dict(segment=[10.1, 20.9], score=0.9)]) + ] + eval_result = activitynet_dataset.evaluate(results, metrics=['AR@AN']) + assert set(eval_result) == set( + ['auc', 'AR@1', 'AR@5', 'AR@10', 'AR@100']) + + def test_activitynet_dump_results(self): + activitynet_dataset = ActivityNetDataset(self.action_ann_file, + self.action_pipeline, + self.data_prefix) + # test dumping json file + results = [ + dict( + video_name='v_test1', + proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]), + dict( + video_name='v_test2', + proposal_list=[dict(segment=[10.1, 20.9], score=0.9)]) + ] + dump_results = { + 'version': 'VERSION 1.3', + 'results': { + 'test1': [{ + 'segment': [0.1, 0.9], + 'score': 0.1 + }], + 'test2': [{ + 'segment': [10.1, 20.9], + 'score': 0.9 + }] + }, + 'external_data': {} + } + + tmp_filename = osp.join(tempfile.gettempdir(), 'result.json') + activitynet_dataset.dump_results(results, tmp_filename, 'json') + assert osp.isfile(tmp_filename) + with open(tmp_filename, 'r+') as f: + load_obj = mmcv.load(f, file_format='json') + assert load_obj == dump_results + os.remove(tmp_filename) + + # test dumping csv file + results = [('test_video', np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, + 10]]))] + with tempfile.TemporaryDirectory() as tmpdir: + activitynet_dataset.dump_results(results, tmpdir, 'csv') + load_obj = np.loadtxt( + osp.join(tmpdir, 'test_video.csv'), + dtype=np.float32, + delimiter=',', + skiprows=1) + assert_array_equal( + load_obj, + np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], + dtype=np.float32)) + + def test_ssn_dataset(self): + # test ssn dataset + ssn_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix) + ssn_infos = ssn_dataset.video_infos + assert ssn_infos[0]['video_id'] == 'test_imgs' + assert ssn_infos[0]['total_frames'] == 5 + + # test ssn dataset with verbose + ssn_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix, + verbose=True) + ssn_infos = ssn_dataset.video_infos + assert ssn_infos[0]['video_id'] == 'test_imgs' + assert ssn_infos[0]['total_frames'] == 5 + + # test ssn datatset with normalized proposal file + with pytest.raises(Exception): + ssn_dataset = SSNDataset( + self.proposal_norm_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix) + ssn_infos = ssn_dataset.video_infos + + # test ssn dataset with reg_normalize_constants + ssn_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix, + reg_normalize_constants=[[[-0.0603, 0.0325], [0.0752, 0.1596]]]) + ssn_infos = ssn_dataset.video_infos + assert ssn_infos[0]['video_id'] == 'test_imgs' + assert ssn_infos[0]['total_frames'] == 5 + + # test error case + with pytest.raises(TypeError): + ssn_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix, + aug_ratio=('error', 'error')) + ssn_infos = ssn_dataset.video_infos + + def test_ssn_evaluate(self): + ssn_dataset = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg, + data_prefix=self.data_prefix) + ssn_dataset_topall = SSNDataset( + self.proposal_ann_file, + self.proposal_pipeline, + self.proposal_train_cfg, + self.proposal_test_cfg_topall, + data_prefix=self.data_prefix) + + with pytest.raises(TypeError): + # results must be a list + ssn_dataset.evaluate('0.5') + + with pytest.raises(AssertionError): + # The length of results must be equal to the dataset len + ssn_dataset.evaluate([0] * 5) + + with pytest.raises(KeyError): + # unsupported metric + ssn_dataset.evaluate([0] * len(ssn_dataset), metrics='iou') + + # evaluate mAP metric + results_relative_proposal_list = np.random.randn(16, 2) + results_activity_scores = np.random.randn(16, 21) + results_completeness_scores = np.random.randn(16, 20) + results_bbox_preds = np.random.randn(16, 20, 2) + results = [ + dict( + relative_proposal_list=results_relative_proposal_list, + activity_scores=results_activity_scores, + completeness_scores=results_completeness_scores, + bbox_preds=results_bbox_preds) + ] + eval_result = ssn_dataset.evaluate(results, metrics=['mAP']) + assert set(eval_result) == set([ + 'mAP@0.10', 'mAP@0.20', 'mAP@0.30', 'mAP@0.40', 'mAP@0.50', + 'mAP@0.50', 'mAP@0.60', 'mAP@0.70', 'mAP@0.80', 'mAP@0.90' + ]) + + # evaluate mAP metric without filtering topk + results_relative_proposal_list = np.random.randn(16, 2) + results_activity_scores = np.random.randn(16, 21) + results_completeness_scores = np.random.randn(16, 20) + results_bbox_preds = np.random.randn(16, 20, 2) + results = [ + dict( + relative_proposal_list=results_relative_proposal_list, + activity_scores=results_activity_scores, + completeness_scores=results_completeness_scores, + bbox_preds=results_bbox_preds) + ] + eval_result = ssn_dataset_topall.evaluate(results, metrics=['mAP']) + assert set(eval_result) == set([ + 'mAP@0.10', 'mAP@0.20', 'mAP@0.30', 'mAP@0.40', 'mAP@0.50', + 'mAP@0.50', 'mAP@0.60', 'mAP@0.70', 'mAP@0.80', 'mAP@0.90' + ]) + + def test_audio_evaluate(self): + audio_dataset = AudioDataset( + self.audio_ann_file, + self.audio_pipeline, + data_prefix=self.data_prefix) + + with pytest.raises(TypeError): + # results must be a list + audio_dataset.evaluate('0.5') + + with pytest.raises(AssertionError): + # The length of results must be equal to the dataset len + audio_dataset.evaluate([0] * 5) + + with pytest.raises(TypeError): + # topk must be int or tuple of int + audio_dataset.evaluate( + [0] * len(audio_dataset), + metric_options=dict(top_k_accuracy=dict(topk=1.))) + + with pytest.raises(KeyError): + # unsupported metric + audio_dataset.evaluate([0] * len(audio_dataset), metrics='iou') + + # evaluate top_k_accuracy and mean_class_accuracy metric + results = [np.array([0.1, 0.5, 0.4])] * 2 + eval_result = audio_dataset.evaluate( + results, metrics=['top_k_accuracy', 'mean_class_accuracy']) + assert set(eval_result.keys()) == set( + ['top1_acc', 'top5_acc', 'mean_class_accuracy']) + + def test_audio_feature_evaluate(self): + audio_dataset = AudioFeatureDataset( + self.audio_feature_ann_file, + self.audio_feature_pipeline, + data_prefix=self.data_prefix) + + with pytest.raises(TypeError): + # results must be a list + audio_dataset.evaluate('0.5') + + with pytest.raises(AssertionError): + # The length of results must be equal to the dataset len + audio_dataset.evaluate([0] * 5) + + with pytest.raises(TypeError): + # topk must be int or tuple of int + audio_dataset.evaluate( + [0] * len(audio_dataset), + metric_options=dict(top_k_accuracy=dict(topk=1.))) + + with pytest.raises(KeyError): + # unsupported metric + audio_dataset.evaluate([0] * len(audio_dataset), metrics='iou') + + # evaluate top_k_accuracy and mean_class_accuracy metric + results = [np.array([0.1, 0.5, 0.4])] * 2 + eval_result = audio_dataset.evaluate( + results, metrics=['top_k_accuracy', 'mean_class_accuracy']) + assert set(eval_result) == set( + ['top1_acc', 'top5_acc', 'mean_class_accuracy']) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_data/test_formating.py b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_formating.py new file mode 100644 index 0000000..a443b1d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_formating.py @@ -0,0 +1,178 @@ +import numpy as np +import pytest +import torch +from mmcv.parallel import DataContainer as DC + +from mmaction.datasets.pipelines import (Collect, FormatAudioShape, + FormatShape, ImageToTensor, + ToDataContainer, ToTensor, Transpose) + + +def check_keys_contain(result_keys, target_keys): + """Check if all elements in target_keys is in result_keys.""" + return set(target_keys).issubset(set(result_keys)) + + +def test_to_tensor(): + to_tensor = ToTensor(['str']) + with pytest.raises(TypeError): + # str cannot be converted to tensor + results = dict(str='0') + to_tensor(results) + + # convert tensor, numpy, squence, int, float to tensor + target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float'] + to_tensor = ToTensor(target_keys) + original_results = dict( + tensor=torch.randn(2, 3), + numpy=np.random.randn(2, 3), + sequence=list(range(10)), + int=1, + float=0.1) + results = to_tensor(original_results) + assert check_keys_contain(results.keys(), target_keys) + for key in target_keys: + assert isinstance(results[key], torch.Tensor) + assert torch.equal(results[key].data, original_results[key]) + + # Add an additional key which is not in keys. + original_results = dict( + tensor=torch.randn(2, 3), + numpy=np.random.randn(2, 3), + sequence=list(range(10)), + int=1, + float=0.1, + str='test') + results = to_tensor(original_results) + assert check_keys_contain(results.keys(), target_keys) + for key in target_keys: + assert isinstance(results[key], torch.Tensor) + assert torch.equal(results[key].data, original_results[key]) + + assert repr(to_tensor) == to_tensor.__class__.__name__ + \ + f'(keys={target_keys})' + + +def test_to_data_container(): + # check user-defined fields + fields = (dict(key='key1', stack=True), dict(key='key2')) + to_data_container = ToDataContainer(fields=fields) + target_keys = ['key1', 'key2'] + original_results = dict(key1=np.random.randn(10, 20), key2=['a', 'b']) + results = to_data_container(original_results.copy()) + assert check_keys_contain(results.keys(), target_keys) + for key in target_keys: + assert isinstance(results[key], DC) + assert np.all(results[key].data == original_results[key]) + assert results['key1'].stack + assert not results['key2'].stack + + # Add an additional key which is not in keys. + original_results = dict( + key1=np.random.randn(10, 20), key2=['a', 'b'], key3='value3') + results = to_data_container(original_results.copy()) + assert check_keys_contain(results.keys(), target_keys) + for key in target_keys: + assert isinstance(results[key], DC) + assert np.all(results[key].data == original_results[key]) + assert results['key1'].stack + assert not results['key2'].stack + + assert repr(to_data_container) == ( + to_data_container.__class__.__name__ + f'(fields={fields})') + + +def test_image_to_tensor(): + original_results = dict(imgs=np.random.randn(256, 256, 3)) + keys = ['imgs'] + image_to_tensor = ImageToTensor(keys) + results = image_to_tensor(original_results) + assert results['imgs'].shape == torch.Size([3, 256, 256]) + assert isinstance(results['imgs'], torch.Tensor) + assert torch.equal(results['imgs'].data, original_results['imgs']) + assert repr(image_to_tensor) == image_to_tensor.__class__.__name__ + \ + f'(keys={keys})' + + +def test_transpose(): + results = dict(imgs=np.random.randn(256, 256, 3)) + keys = ['imgs'] + order = [2, 0, 1] + transpose = Transpose(keys, order) + results = transpose(results) + assert results['imgs'].shape == (3, 256, 256) + assert repr(transpose) == transpose.__class__.__name__ + \ + f'(keys={keys}, order={order})' + + +def test_collect(): + inputs = dict( + imgs=np.random.randn(256, 256, 3), + label=[1], + filename='test.txt', + original_shape=(256, 256, 3), + img_shape=(256, 256, 3), + pad_shape=(256, 256, 3), + flip_direction='vertical', + img_norm_cfg=dict(to_bgr=False)) + keys = ['imgs', 'label'] + collect = Collect(keys) + results = collect(inputs) + assert sorted(list(results.keys())) == sorted( + ['imgs', 'label', 'img_meta']) + inputs.pop('imgs') + assert set(results['img_meta'].data.keys()) == set(inputs.keys()) + for key in results['img_meta'].data: + assert results['img_meta'].data[key] == inputs[key] + assert repr(collect) == collect.__class__.__name__ + \ + f'(keys={keys}, meta_keys={collect.meta_keys})' + + +def test_format_shape(): + with pytest.raises(ValueError): + # invalid input format + FormatShape('NHWC') + + # 'NCHW' input format + results = dict( + imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3) + format_shape = FormatShape('NCHW') + assert format_shape(results)['input_shape'] == (3, 3, 224, 224) + + # `NCTHW` input format with num_clips=1, clip_len=3 + results = dict( + imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3) + format_shape = FormatShape('NCTHW') + assert format_shape(results)['input_shape'] == (1, 3, 3, 224, 224) + + # `NCTHW` input format with num_clips=2, clip_len=3 + results = dict( + imgs=np.random.randn(18, 224, 224, 3), num_clips=2, clip_len=3) + assert format_shape(results)['input_shape'] == (6, 3, 3, 224, 224) + target_keys = ['imgs', 'input_shape'] + assert check_keys_contain(results.keys(), target_keys) + + assert repr(format_shape) == format_shape.__class__.__name__ + \ + "(input_format='NCTHW')" + + # 'NPTCHW' input format + results = dict( + imgs=np.random.randn(72, 224, 224, 3), + num_clips=9, + clip_len=1, + num_proposals=8) + format_shape = FormatShape('NPTCHW') + assert format_shape(results)['input_shape'] == (8, 9, 3, 224, 224) + + +def test_format_audio_shape(): + with pytest.raises(ValueError): + # invalid input format + FormatAudioShape('XXXX') + + # 'NCTF' input format + results = dict(audios=np.random.randn(3, 128, 8)) + format_shape = FormatAudioShape('NCTF') + assert format_shape(results)['input_shape'] == (3, 1, 128, 8) + assert repr(format_shape) == format_shape.__class__.__name__ + \ + "(input_format='NCTF')" diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_data/test_loading.py b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_loading.py new file mode 100644 index 0000000..2b58bb4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_data/test_loading.py @@ -0,0 +1,1495 @@ +import copy +import os.path as osp + +import mmcv +import numpy as np +import pytest +import torch +from numpy.testing import assert_array_almost_equal, assert_array_equal + +# yapf: disable +from mmaction.datasets.pipelines import (AudioDecode, AudioDecodeInit, + AudioFeatureSelector, DecordDecode, + DecordInit, DenseSampleFrames, + FrameSelector, + GenerateLocalizationLabels, + LoadAudioFeature, LoadHVULabel, + LoadLocalizationFeature, + LoadProposals, OpenCVDecode, + OpenCVInit, PyAVDecode, + PyAVDecodeMotionVector, PyAVInit, + RawFrameDecode, SampleAVAFrames, + SampleFrames, SampleProposalFrames, + UntrimmedSampleFrames) + +# yapf: enable + + +class ExampleSSNInstance: + + def __init__(self, + start_frame, + end_frame, + num_frames, + label=None, + best_iou=None, + overlap_self=None): + self.start_frame = start_frame + self.end_frame = min(end_frame, num_frames) + self.label = label if label is not None else -1 + self.coverage = (end_frame - start_frame) / num_frames + self.best_iou = best_iou + self.overlap_self = overlap_self + + +class TestLoading: + + @staticmethod + def check_keys_contain(result_keys, target_keys): + """Check if all elements in target_keys is in result_keys.""" + return set(target_keys).issubset(set(result_keys)) + + @classmethod + def setup_class(cls): + cls.img_path = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test.jpg') + cls.video_path = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test.mp4') + cls.wav_path = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test.wav') + cls.audio_spec_path = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test.npy') + cls.img_dir = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test_imgs') + cls.raw_feature_dir = osp.join( + osp.dirname(osp.dirname(__file__)), + 'data/test_activitynet_features') + cls.bsp_feature_dir = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test_bsp_features') + cls.proposals_dir = osp.join( + osp.dirname(osp.dirname(__file__)), 'data/test_proposals') + cls.total_frames = 5 + cls.filename_tmpl = 'img_{:05}.jpg' + cls.flow_filename_tmpl = '{}_{:05d}.jpg' + video_total_frames = len(mmcv.VideoReader(cls.video_path)) + cls.audio_total_frames = video_total_frames + cls.video_results = dict( + filename=cls.video_path, + label=1, + total_frames=video_total_frames, + start_index=0) + cls.audio_results = dict( + audios=np.random.randn(1280, ), + audio_path=cls.wav_path, + total_frames=cls.audio_total_frames, + label=1, + start_index=0) + cls.audio_feature_results = dict( + audios=np.random.randn(128, 80), + audio_path=cls.audio_spec_path, + total_frames=cls.audio_total_frames, + label=1, + start_index=0) + cls.frame_results = dict( + frame_dir=cls.img_dir, + total_frames=cls.total_frames, + filename_tmpl=cls.filename_tmpl, + start_index=1, + modality='RGB', + offset=0, + label=1) + cls.flow_frame_results = dict( + frame_dir=cls.img_dir, + total_frames=cls.total_frames, + filename_tmpl=cls.flow_filename_tmpl, + modality='Flow', + offset=0, + label=1) + cls.action_results = dict( + video_name='v_test1', + data_prefix=cls.raw_feature_dir, + temporal_scale=5, + boundary_ratio=0.1, + duration_second=10, + duration_frame=10, + feature_frame=8, + annotations=[{ + 'segment': [3.0, 5.0], + 'label': 'Rock climbing' + }]) + cls.proposal_results = dict( + frame_dir=cls.img_dir, + video_id='test_imgs', + total_frames=cls.total_frames, + filename_tmpl=cls.filename_tmpl, + start_index=1, + out_proposals=[[[ + 'test_imgs', + ExampleSSNInstance(1, 4, 10, 1, 1, 1) + ], 0], [['test_imgs', + ExampleSSNInstance(2, 5, 10, 2, 1, 1)], 0]]) + + cls.ava_results = dict( + fps=30, timestamp=902, timestamp_start=840, shot_info=(0, 27000)) + + cls.hvu_label_example1 = dict( + categories=['action', 'object', 'scene', 'concept'], + category_nums=[2, 5, 3, 2], + label=dict(action=[0], object=[2, 3], scene=[0, 1])) + cls.hvu_label_example2 = dict( + categories=['action', 'object', 'scene', 'concept'], + category_nums=[2, 5, 3, 2], + label=dict(action=[1], scene=[1, 2], concept=[1])) + + def test_load_hvu_label(self): + hvu_label_example1 = copy.deepcopy(self.hvu_label_example1) + hvu_label_example2 = copy.deepcopy(self.hvu_label_example2) + categories = hvu_label_example1['categories'] + category_nums = hvu_label_example1['category_nums'] + num_tags = sum(category_nums) + num_categories = len(categories) + + loader = LoadHVULabel() + assert repr(loader) == (f'{loader.__class__.__name__}(' + f'hvu_initialized={False})') + + result1 = loader(hvu_label_example1) + label1 = torch.zeros(num_tags) + mask1 = torch.zeros(num_tags) + category_mask1 = torch.zeros(num_categories) + + assert repr(loader) == (f'{loader.__class__.__name__}(' + f'hvu_initialized={True})') + + label1[[0, 4, 5, 7, 8]] = 1. + mask1[:10] = 1. + category_mask1[:3] = 1. + + assert torch.all(torch.eq(label1, result1['label'])) + assert torch.all(torch.eq(mask1, result1['mask'])) + assert torch.all(torch.eq(category_mask1, result1['category_mask'])) + + result2 = loader(hvu_label_example2) + label2 = torch.zeros(num_tags) + mask2 = torch.zeros(num_tags) + category_mask2 = torch.zeros(num_categories) + + label2[[1, 8, 9, 11]] = 1. + mask2[:2] = 1. + mask2[7:] = 1. + category_mask2[[0, 2, 3]] = 1. + + assert torch.all(torch.eq(label2, result2['label'])) + assert torch.all(torch.eq(mask2, result2['mask'])) + assert torch.all(torch.eq(category_mask2, result2['category_mask'])) + + def test_sample_frames(self): + target_keys = [ + 'frame_inds', 'clip_len', 'frame_interval', 'num_clips', + 'total_frames' + ] + + with pytest.warns(UserWarning): + # start_index has been deprecated + config = dict( + clip_len=3, frame_interval=1, num_clips=5, start_index=1) + SampleFrames(**config) + + # Sample Frame with no temporal_jitter + # clip_len=3, frame_interval=1, num_clips=5 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=3, frame_interval=1, num_clips=5, temporal_jitter=False) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 15 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 15 + assert np.max(sample_frames_results['frame_inds']) <= 5 + assert np.min(sample_frames_results['frame_inds']) >= 1 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={3}, ' + f'frame_interval={1}, ' + f'num_clips={5}, ' + f'temporal_jitter={False}, ' + f'twice_sample={False}, ' + f'out_of_bound_opt=loop, ' + f'test_mode={False})') + + # Sample Frame with no temporal_jitter + # clip_len=5, frame_interval=1, num_clips=5, + # out_of_bound_opt='repeat_last' + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=5, + frame_interval=1, + num_clips=5, + temporal_jitter=False, + out_of_bound_opt='repeat_last') + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={5}, ' + f'frame_interval={1}, ' + f'num_clips={5}, ' + f'temporal_jitter={False}, ' + f'twice_sample={False}, ' + f'out_of_bound_opt=repeat_last, ' + f'test_mode={False})') + + def check_monotonous(arr): + length = arr.shape[0] + for i in range(length - 1): + if arr[i] > arr[i + 1]: + return False + return True + + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 25 + frame_inds = sample_frames_results['frame_inds'].reshape([5, 5]) + for i in range(5): + assert check_monotonous(frame_inds[i]) + + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 25 + frame_inds = sample_frames_results['frame_inds'].reshape([5, 5]) + for i in range(5): + assert check_monotonous(frame_inds[i]) + assert np.max(sample_frames_results['frame_inds']) <= 5 + assert np.min(sample_frames_results['frame_inds']) >= 1 + + # Sample Frame with temporal_jitter + # clip_len=4, frame_interval=2, num_clips=5 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, frame_interval=2, num_clips=5, temporal_jitter=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 20 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 20 + assert np.max(sample_frames_results['frame_inds']) <= 5 + assert np.min(sample_frames_results['frame_inds']) >= 1 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={4}, ' + f'frame_interval={2}, ' + f'num_clips={5}, ' + f'temporal_jitter={True}, ' + f'twice_sample={False}, ' + f'out_of_bound_opt=loop, ' + f'test_mode={False})') + + # Sample Frame with no temporal_jitter in test mode + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, + frame_interval=1, + num_clips=6, + temporal_jitter=False, + test_mode=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 24 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 24 + assert np.max(sample_frames_results['frame_inds']) <= 5 + assert np.min(sample_frames_results['frame_inds']) >= 1 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={4}, ' + f'frame_interval={1}, ' + f'num_clips={6}, ' + f'temporal_jitter={False}, ' + f'twice_sample={False}, ' + f'out_of_bound_opt=loop, ' + f'test_mode={True})') + + # Sample Frame with no temporal_jitter in test mode + # clip_len=3, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=3, + frame_interval=1, + num_clips=6, + temporal_jitter=False, + test_mode=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 18 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 18 + assert np.max(sample_frames_results['frame_inds']) <= 5 + assert np.min(sample_frames_results['frame_inds']) >= 1 + + # Sample Frame with no temporal_jitter to get clip_offsets + # clip_len=1, frame_interval=1, num_clips=8 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 6 + config = dict( + clip_len=1, + frame_interval=1, + num_clips=8, + temporal_jitter=False, + test_mode=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 8 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 8 + assert_array_equal(sample_frames_results['frame_inds'], + np.array([1, 2, 2, 3, 4, 5, 5, 6])) + + # Sample Frame with no temporal_jitter to get clip_offsets + # clip_len=1, frame_interval=1, num_clips=8 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 6 + config = dict( + clip_len=1, + frame_interval=1, + num_clips=8, + temporal_jitter=False, + test_mode=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 8 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 8 + assert_array_equal(sample_frames_results['frame_inds'], + np.array([1, 2, 2, 3, 4, 5, 5, 6])) + + # Sample Frame with no temporal_jitter to get clip_offsets zero + # clip_len=6, frame_interval=1, num_clips=1 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 5 + config = dict( + clip_len=6, + frame_interval=1, + num_clips=1, + temporal_jitter=False, + test_mode=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 6 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 6 + assert_array_equal(sample_frames_results['frame_inds'], + [1, 2, 3, 4, 5, 1]) + + # Sample Frame with no temporal_jitter to get avg_interval <= 0 + # clip_len=12, frame_interval=1, num_clips=20 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 30 + config = dict( + clip_len=12, + frame_interval=1, + num_clips=20, + temporal_jitter=False, + test_mode=False) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 240 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 240 + assert np.max(sample_frames_results['frame_inds']) <= 30 + assert np.min(sample_frames_results['frame_inds']) >= 1 + + # Sample Frame with no temporal_jitter to get clip_offsets + # clip_len=1, frame_interval=1, num_clips=8 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 6 + config = dict( + clip_len=1, + frame_interval=1, + num_clips=8, + temporal_jitter=False, + test_mode=False) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert sample_frames_results['start_index'] == 0 + assert len(sample_frames_results['frame_inds']) == 8 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 8 + assert_array_equal(sample_frames_results['frame_inds'], + np.array([1, 2, 3, 3, 4, 5, 5, 6])) + + # Sample Frame with no temporal_jitter to get clip_offsets zero + # clip_len=12, frame_interval=1, num_clips=2 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 10 + config = dict( + clip_len=12, + frame_interval=1, + num_clips=2, + temporal_jitter=False, + test_mode=False) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 24 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 24 + assert np.max(sample_frames_results['frame_inds']) <= 10 + assert np.min(sample_frames_results['frame_inds']) >= 1 + + # Sample Frame using twice sample + # clip_len=12, frame_interval=1, num_clips=2 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + frame_result['total_frames'] = 40 + config = dict( + clip_len=12, + frame_interval=1, + num_clips=2, + temporal_jitter=False, + twice_sample=True, + test_mode=True) + sample_frames = SampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 48 + sample_frames_results = sample_frames(frame_result) + assert len(sample_frames_results['frame_inds']) == 48 + assert np.max(sample_frames_results['frame_inds']) <= 40 + assert np.min(sample_frames_results['frame_inds']) >= 1 + + def test_dense_sample_frames(self): + target_keys = [ + 'frame_inds', 'clip_len', 'frame_interval', 'num_clips', + 'total_frames' + ] + + # Dense sample with no temporal_jitter in test mode + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, + frame_interval=1, + num_clips=6, + temporal_jitter=False, + test_mode=True) + dense_sample_frames = DenseSampleFrames(**config) + dense_sample_frames_results = dense_sample_frames(video_result) + assert dense_sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(dense_sample_frames_results.keys(), + target_keys) + assert len(dense_sample_frames_results['frame_inds']) == 240 + dense_sample_frames_results = dense_sample_frames(frame_result) + assert len(dense_sample_frames_results['frame_inds']) == 240 + assert repr(dense_sample_frames) == ( + f'{dense_sample_frames.__class__.__name__}(' + f'clip_len={4}, ' + f'frame_interval={1}, ' + f'num_clips={6}, ' + f'sample_range={64}, ' + f'num_sample_positions={10}, ' + f'temporal_jitter={False}, ' + f'out_of_bound_opt=loop, ' + f'test_mode={True})') + + # Dense sample with no temporal_jitter + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, frame_interval=1, num_clips=6, temporal_jitter=False) + dense_sample_frames = DenseSampleFrames(**config) + dense_sample_frames_results = dense_sample_frames(video_result) + assert dense_sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(dense_sample_frames_results.keys(), + target_keys) + assert len(dense_sample_frames_results['frame_inds']) == 24 + dense_sample_frames_results = dense_sample_frames(frame_result) + assert len(dense_sample_frames_results['frame_inds']) == 24 + + # Dense sample with no temporal_jitter, sample_range=32 in test mode + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, + frame_interval=1, + num_clips=6, + sample_range=32, + temporal_jitter=False, + test_mode=True) + dense_sample_frames = DenseSampleFrames(**config) + dense_sample_frames_results = dense_sample_frames(video_result) + assert dense_sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(dense_sample_frames_results.keys(), + target_keys) + assert len(dense_sample_frames_results['frame_inds']) == 240 + dense_sample_frames_results = dense_sample_frames(frame_result) + assert len(dense_sample_frames_results['frame_inds']) == 240 + + # Dense sample with no temporal_jitter, sample_range=32 + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, + frame_interval=1, + num_clips=6, + sample_range=32, + temporal_jitter=False) + dense_sample_frames = DenseSampleFrames(**config) + dense_sample_frames_results = dense_sample_frames(video_result) + assert dense_sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(dense_sample_frames_results.keys(), + target_keys) + assert len(dense_sample_frames_results['frame_inds']) == 24 + dense_sample_frames_results = dense_sample_frames(frame_result) + assert len(dense_sample_frames_results['frame_inds']) == 24 + assert repr(dense_sample_frames) == ( + f'{dense_sample_frames.__class__.__name__}(' + f'clip_len={4}, ' + f'frame_interval={1}, ' + f'num_clips={6}, ' + f'sample_range={32}, ' + f'num_sample_positions={10}, ' + f'temporal_jitter={False}, ' + f'out_of_bound_opt=loop, ' + f'test_mode={False})') + + # Dense sample with no temporal_jitter, sample_range=1000 to check mod + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, + frame_interval=1, + num_clips=6, + sample_range=1000, + temporal_jitter=False) + dense_sample_frames = DenseSampleFrames(**config) + dense_sample_frames_results = dense_sample_frames(video_result) + assert dense_sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(dense_sample_frames_results.keys(), + target_keys) + assert len(dense_sample_frames_results['frame_inds']) == 24 + dense_sample_frames_results = dense_sample_frames(frame_result) + assert len(dense_sample_frames_results['frame_inds']) == 24 + + # Dense sample with no temporal_jitter in test mode + # sample_range=32, num_sample_positions=5 + # clip_len=4, frame_interval=1, num_clips=6 + video_result = copy.deepcopy(self.video_results) + frame_result = copy.deepcopy(self.frame_results) + config = dict( + clip_len=4, + frame_interval=1, + num_clips=6, + num_sample_positions=5, + sample_range=32, + temporal_jitter=False, + test_mode=True) + dense_sample_frames = DenseSampleFrames(**config) + dense_sample_frames_results = dense_sample_frames(video_result) + assert dense_sample_frames_results['start_index'] == 0 + assert self.check_keys_contain(dense_sample_frames_results.keys(), + target_keys) + assert len(dense_sample_frames_results['frame_inds']) == 120 + dense_sample_frames_results = dense_sample_frames(frame_result) + assert len(dense_sample_frames_results['frame_inds']) == 120 + assert repr(dense_sample_frames) == ( + f'{dense_sample_frames.__class__.__name__}(' + f'clip_len={4}, ' + f'frame_interval={1}, ' + f'num_clips={6}, ' + f'sample_range={32}, ' + f'num_sample_positions={5}, ' + f'temporal_jitter={False}, ' + f'out_of_bound_opt=loop, ' + f'test_mode={True})') + + def test_untrim_sample_frames(self): + + target_keys = [ + 'frame_inds', 'clip_len', 'frame_interval', 'num_clips', + 'total_frames' + ] + + frame_result = dict( + frame_dir=None, + total_frames=100, + filename_tmpl=None, + modality='RGB', + start_index=0, + label=1) + video_result = copy.deepcopy(self.video_results) + + config = dict(clip_len=1, frame_interval=16, start_index=0) + sample_frames = UntrimmedSampleFrames(**config) + sample_frames_results = sample_frames(frame_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 6 + assert_array_equal(sample_frames_results['frame_inds'], + np.array([8, 24, 40, 56, 72, 88])) + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'frame_interval={16})') + + config = dict(clip_len=1, frame_interval=16, start_index=0) + sample_frames = UntrimmedSampleFrames(**config) + sample_frames_results = sample_frames(video_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + frame_inds = np.array(list(range(8, 300, 16))) + assert len(sample_frames_results['frame_inds']) == frame_inds.shape[0] + assert_array_equal(sample_frames_results['frame_inds'], frame_inds) + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'frame_interval={16})') + + config = dict(clip_len=1, frame_interval=16) + sample_frames = UntrimmedSampleFrames(**config) + frame_result_ = copy.deepcopy(frame_result) + frame_result_['start_index'] = 1 + sample_frames_results = sample_frames(frame_result_) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 6 + assert_array_equal(sample_frames_results['frame_inds'], + np.array([8, 24, 40, 56, 72, 88]) + 1) + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'frame_interval={16})') + + config = dict(clip_len=3, frame_interval=16, start_index=0) + sample_frames = UntrimmedSampleFrames(**config) + sample_frames_results = sample_frames(frame_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 18 + assert_array_equal( + sample_frames_results['frame_inds'], + np.array([ + 7, 8, 9, 23, 24, 25, 39, 40, 41, 55, 56, 57, 71, 72, 73, 87, + 88, 89 + ])) + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={3}, ' + f'frame_interval={16})') + + def test_sample_ava_frames(self): + target_keys = [ + 'fps', 'timestamp', 'timestamp_start', 'shot_info', 'frame_inds', + 'clip_len', 'frame_interval' + ] + config = dict(clip_len=32, frame_interval=2) + sample_ava_dataset = SampleAVAFrames(**config) + ava_result = sample_ava_dataset(results=self.ava_results) + assert self.check_keys_contain(ava_result.keys(), target_keys) + assert ava_result['clip_len'] == 32 + assert ava_result['frame_interval'] == 2 + assert len(ava_result['frame_inds']) == 32 + assert repr(sample_ava_dataset) == ( + f'{sample_ava_dataset.__class__.__name__}(' + f'clip_len={32}, ' + f'frame_interval={2}, ' + f'test_mode={False})') + + # add test case in Issue #306 + config = dict(clip_len=8, frame_interval=8) + sample_ava_dataset = SampleAVAFrames(**config) + ava_result = sample_ava_dataset(results=self.ava_results) + assert self.check_keys_contain(ava_result.keys(), target_keys) + assert ava_result['clip_len'] == 8 + assert ava_result['frame_interval'] == 8 + assert len(ava_result['frame_inds']) == 8 + assert repr(sample_ava_dataset) == ( + f'{sample_ava_dataset.__class__.__name__}(' + f'clip_len={8}, ' + f'frame_interval={8}, ' + f'test_mode={False})') + + def test_sample_proposal_frames(self): + target_keys = [ + 'frame_inds', 'clip_len', 'frame_interval', 'num_clips', + 'total_frames', 'start_index' + ] + + # test error cases + with pytest.raises(TypeError): + proposal_result = copy.deepcopy(self.proposal_results) + config = dict( + clip_len=1, + frame_interval=1, + body_segments=2, + aug_segments=('error', 'error'), + aug_ratio=0.5, + temporal_jitter=False) + sample_frames = SampleProposalFrames(**config) + sample_frames(proposal_result) + + # test normal cases + # Sample Frame with no temporal_jitter + # clip_len=1, frame_interval=1 + # body_segments=2, aug_segments=(1, 1) + proposal_result = copy.deepcopy(self.proposal_results) + proposal_result['total_frames'] = 9 + config = dict( + clip_len=1, + frame_interval=1, + body_segments=2, + aug_segments=(1, 1), + aug_ratio=0.5, + temporal_jitter=False) + sample_frames = SampleProposalFrames(**config) + sample_frames_results = sample_frames(proposal_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 8 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'body_segments={2}, ' + f'aug_segments={(1, 1)}, ' + f'aug_ratio={(0.5, 0.5)}, ' + f'frame_interval={1}, ' + f'test_interval={6}, ' + f'temporal_jitter={False}, ' + f'mode=train)') + + # Sample Frame with temporal_jitter + # clip_len=1, frame_interval=1 + # body_segments=2, aug_segments=(1, 1) + proposal_result = copy.deepcopy(self.proposal_results) + proposal_result['total_frames'] = 9 + config = dict( + clip_len=1, + frame_interval=1, + body_segments=2, + aug_segments=(1, 1), + aug_ratio=0.5, + temporal_jitter=True) + sample_frames = SampleProposalFrames(**config) + sample_frames_results = sample_frames(proposal_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 8 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'body_segments={2}, ' + f'aug_segments={(1, 1)}, ' + f'aug_ratio={(0.5, 0.5)}, ' + f'frame_interval={1}, ' + f'test_interval={6}, ' + f'temporal_jitter={True}, ' + f'mode=train)') + + # Sample Frame with no temporal_jitter in val mode + # clip_len=1, frame_interval=1 + # body_segments=2, aug_segments=(1, 1) + proposal_result = copy.deepcopy(self.proposal_results) + proposal_result['total_frames'] = 9 + config = dict( + clip_len=1, + frame_interval=1, + body_segments=2, + aug_segments=(1, 1), + aug_ratio=0.5, + temporal_jitter=False, + mode='val') + sample_frames = SampleProposalFrames(**config) + sample_frames_results = sample_frames(proposal_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 8 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'body_segments={2}, ' + f'aug_segments={(1, 1)}, ' + f'aug_ratio={(0.5, 0.5)}, ' + f'frame_interval={1}, ' + f'test_interval={6}, ' + f'temporal_jitter={False}, ' + f'mode=val)') + + # Sample Frame with no temporal_jitter in test mode + # test_interval=2 + proposal_result = copy.deepcopy(self.proposal_results) + proposal_result['out_proposals'] = None + proposal_result['total_frames'] = 10 + config = dict( + clip_len=1, + frame_interval=1, + body_segments=2, + aug_segments=(1, 1), + aug_ratio=0.5, + test_interval=2, + temporal_jitter=False, + mode='test') + sample_frames = SampleProposalFrames(**config) + sample_frames_results = sample_frames(proposal_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 5 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'body_segments={2}, ' + f'aug_segments={(1, 1)}, ' + f'aug_ratio={(0.5, 0.5)}, ' + f'frame_interval={1}, ' + f'test_interval={2}, ' + f'temporal_jitter={False}, ' + f'mode=test)') + + # Sample Frame with no temporal_jitter to get clip_offsets zero + # clip_len=1, frame_interval=1 + # body_segments=2, aug_segments=(1, 1) + proposal_result = copy.deepcopy(self.proposal_results) + proposal_result['total_frames'] = 3 + config = dict( + clip_len=1, + frame_interval=1, + body_segments=2, + aug_segments=(1, 1), + aug_ratio=0.5, + temporal_jitter=False) + sample_frames = SampleProposalFrames(**config) + sample_frames_results = sample_frames(proposal_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 8 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'body_segments={2}, ' + f'aug_segments={(1, 1)}, ' + f'aug_ratio={(0.5, 0.5)}, ' + f'frame_interval={1}, ' + f'test_interval={6}, ' + f'temporal_jitter={False}, ' + f'mode=train)') + + # Sample Frame with no temporal_jitter to + # get clip_offsets zero in val mode + # clip_len=1, frame_interval=1 + # body_segments=4, aug_segments=(2, 2) + proposal_result = copy.deepcopy(self.proposal_results) + proposal_result['total_frames'] = 3 + config = dict( + clip_len=1, + frame_interval=1, + body_segments=4, + aug_segments=(2, 2), + aug_ratio=0.5, + temporal_jitter=False, + mode='val') + sample_frames = SampleProposalFrames(**config) + sample_frames_results = sample_frames(proposal_result) + assert self.check_keys_contain(sample_frames_results.keys(), + target_keys) + assert len(sample_frames_results['frame_inds']) == 16 + assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}(' + f'clip_len={1}, ' + f'body_segments={4}, ' + f'aug_segments={(2, 2)}, ' + f'aug_ratio={(0.5, 0.5)}, ' + f'frame_interval={1}, ' + f'test_interval={6}, ' + f'temporal_jitter={False}, ' + f'mode=val)') + + def test_pyav_init(self): + target_keys = ['video_reader', 'total_frames'] + video_result = copy.deepcopy(self.video_results) + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + assert self.check_keys_contain(pyav_init_result.keys(), target_keys) + assert pyav_init_result['total_frames'] == 300 + assert repr( + pyav_init) == f'{pyav_init.__class__.__name__}(io_backend=disk)' + + def test_pyav_decode(self): + target_keys = ['frame_inds', 'imgs', 'original_shape'] + + # test PyAV with 2 dim input and start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, + 2)[:, np.newaxis] + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + video_result['video_reader'] = pyav_init_result['video_reader'] + + pyav_decode = PyAVDecode() + pyav_decode_result = pyav_decode(video_result) + assert self.check_keys_contain(pyav_decode_result.keys(), target_keys) + assert pyav_decode_result['original_shape'] == (256, 340) + assert np.shape(pyav_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}(' + f'multi_thread={False})') + + # test PyAV with 1 dim input and start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, 5) + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + video_result['video_reader'] = pyav_init_result['video_reader'] + + pyav_decode = PyAVDecode() + pyav_decode_result = pyav_decode(video_result) + assert self.check_keys_contain(pyav_decode_result.keys(), target_keys) + assert pyav_decode_result['original_shape'] == (256, 340) + assert np.shape(pyav_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # PyAV with multi thread and start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, 5) + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + video_result['video_reader'] = pyav_init_result['video_reader'] + + pyav_decode = PyAVDecode(multi_thread=True) + pyav_decode_result = pyav_decode(video_result) + assert self.check_keys_contain(pyav_decode_result.keys(), target_keys) + assert pyav_decode_result['original_shape'] == (256, 340) + assert np.shape(pyav_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + assert repr(pyav_decode) == (f'{pyav_decode.__class__.__name__}(' + f'multi_thread={True})') + + # test PyAV with 2 dim input + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(1, self.total_frames, + 2)[:, np.newaxis] + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + video_result['video_reader'] = pyav_init_result['video_reader'] + + pyav_decode = PyAVDecode() + pyav_decode_result = pyav_decode(video_result) + assert self.check_keys_contain(pyav_decode_result.keys(), target_keys) + assert pyav_decode_result['original_shape'] == (256, 340) + assert np.shape(pyav_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # test PyAV with 1 dim input + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(1, self.total_frames, 5) + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + video_result['video_reader'] = pyav_init_result['video_reader'] + + pyav_decode = PyAVDecode() + pyav_decode_result = pyav_decode(video_result) + assert self.check_keys_contain(pyav_decode_result.keys(), target_keys) + assert pyav_decode_result['original_shape'] == (256, 340) + assert np.shape(pyav_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # PyAV with multi thread + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(1, self.total_frames, 5) + pyav_init = PyAVInit() + pyav_init_result = pyav_init(video_result) + video_result['video_reader'] = pyav_init_result['video_reader'] + + pyav_decode = PyAVDecode(multi_thread=True) + pyav_decode_result = pyav_decode(video_result) + assert self.check_keys_contain(pyav_decode_result.keys(), target_keys) + assert pyav_decode_result['original_shape'] == (256, 340) + assert np.shape(pyav_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + assert repr(pyav_decode) == pyav_decode.__class__.__name__ + \ + f'(multi_thread={True})' + + def test_decord_init(self): + target_keys = ['video_reader', 'total_frames'] + video_result = copy.deepcopy(self.video_results) + decord_init = DecordInit() + decord_init_result = decord_init(video_result) + assert self.check_keys_contain(decord_init_result.keys(), target_keys) + assert decord_init_result['total_frames'] == len( + decord_init_result['video_reader']) + assert repr(decord_init) == (f'{decord_init.__class__.__name__}(' + f'io_backend=disk, ' + f'num_threads={1})') + + def test_decord_decode(self): + target_keys = ['frame_inds', 'imgs', 'original_shape'] + + # test Decord with 2 dim input and start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, + 3)[:, np.newaxis] + decord_init = DecordInit() + decord_init_result = decord_init(video_result) + video_result['video_reader'] = decord_init_result['video_reader'] + + decord_decode = DecordDecode() + decord_decode_result = decord_decode(video_result) + assert self.check_keys_contain(decord_decode_result.keys(), + target_keys) + assert decord_decode_result['original_shape'] == (256, 340) + assert np.shape(decord_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # test Decord with 1 dim input and start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, 3) + decord_init = DecordInit() + decord_init_result = decord_init(video_result) + video_result['video_reader'] = decord_init_result['video_reader'] + + decord_decode = DecordDecode() + decord_decode_result = decord_decode(video_result) + assert self.check_keys_contain(decord_decode_result.keys(), + target_keys) + assert decord_decode_result['original_shape'] == (256, 340) + assert np.shape(decord_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # test Decord with 2 dim input and start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, + 3)[:, np.newaxis] + decord_init = DecordInit() + decord_init_result = decord_init(video_result) + video_result['video_reader'] = decord_init_result['video_reader'] + + decord_decode = DecordDecode() + decord_decode_result = decord_decode(video_result) + assert self.check_keys_contain(decord_decode_result.keys(), + target_keys) + assert decord_decode_result['original_shape'] == (256, 340) + assert np.shape(decord_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # test Decord with 1 dim input + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(1, self.total_frames, 3) + decord_init = DecordInit() + decord_init_result = decord_init(video_result) + video_result['video_reader'] = decord_init_result['video_reader'] + + decord_decode = DecordDecode() + decord_decode_result = decord_decode(video_result) + assert self.check_keys_contain(decord_decode_result.keys(), + target_keys) + assert decord_decode_result['original_shape'] == (256, 340) + assert np.shape(decord_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + def test_opencv_init(self): + target_keys = ['new_path', 'video_reader', 'total_frames'] + video_result = copy.deepcopy(self.video_results) + opencv_init = OpenCVInit() + opencv_init_result = opencv_init(video_result) + assert self.check_keys_contain(opencv_init_result.keys(), target_keys) + assert opencv_init_result['total_frames'] == len( + opencv_init_result['video_reader']) + assert repr(opencv_init) == (f'{opencv_init.__class__.__name__}(' + f'io_backend=disk)') + + def test_opencv_decode(self): + target_keys = ['frame_inds', 'imgs', 'original_shape'] + + # test OpenCV with 2 dim input when start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, + 2)[:, np.newaxis] + opencv_init = OpenCVInit() + opencv_init_result = opencv_init(video_result) + video_result['video_reader'] = opencv_init_result['video_reader'] + + opencv_decode = OpenCVDecode() + opencv_decode_result = opencv_decode(video_result) + assert self.check_keys_contain(opencv_decode_result.keys(), + target_keys) + assert opencv_decode_result['original_shape'] == (256, 340) + assert np.shape(opencv_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # test OpenCV with 2 dim input + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(1, self.total_frames, + 2)[:, np.newaxis] + opencv_init = OpenCVInit() + opencv_init_result = opencv_init(video_result) + video_result['video_reader'] = opencv_init_result['video_reader'] + + opencv_decode = OpenCVDecode() + opencv_decode_result = opencv_decode(video_result) + assert self.check_keys_contain(opencv_decode_result.keys(), + target_keys) + assert opencv_decode_result['original_shape'] == (256, 340) + assert np.shape(opencv_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + # test OpenCV with 1 dim input when start_index = 0 + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(0, self.total_frames, 3) + opencv_init = OpenCVInit() + opencv_init_result = opencv_init(video_result) + video_result['video_reader'] = opencv_init_result['video_reader'] + + # test OpenCV with 1 dim input + video_result = copy.deepcopy(self.video_results) + video_result['frame_inds'] = np.arange(1, self.total_frames, 3) + opencv_init = OpenCVInit() + opencv_init_result = opencv_init(video_result) + video_result['video_reader'] = opencv_init_result['video_reader'] + + opencv_decode = OpenCVDecode() + opencv_decode_result = opencv_decode(video_result) + assert self.check_keys_contain(opencv_decode_result.keys(), + target_keys) + assert opencv_decode_result['original_shape'] == (256, 340) + assert np.shape(opencv_decode_result['imgs']) == (len( + video_result['frame_inds']), 256, 340, 3) + + def test_rawframe_selector(self): + + with pytest.warns(UserWarning): + FrameSelector(io_backend='disk') + + def test_rawframe_decode(self): + target_keys = ['frame_inds', 'imgs', 'original_shape', 'modality'] + + # test frame selector with 2 dim input + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(0, self.total_frames, 2)[:, + np.newaxis] + # since the test images start with index 1, we plus 1 to frame_inds + # in order to pass the CI + inputs['frame_inds'] = inputs['frame_inds'] + 1 + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector with 2 dim input + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(1, self.total_frames, 2)[:, + np.newaxis] + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector with 1 dim input when start_index = 0 + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(0, self.total_frames, 5) + # since the test images start with index 1, we plus 1 to frame_inds + # in order to pass the CI + inputs['frame_inds'] = inputs['frame_inds'] + 1 + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector with 1 dim input + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(1, self.total_frames, 5) + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector with 1 dim input + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(0, self.total_frames, 2) + # since the test images start with index 1, we plus 1 to frame_inds + # in order to pass the CI + inputs['frame_inds'] = inputs['frame_inds'] + 1 + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector with 1 dim input + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(1, self.total_frames, 2) + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector with 1 dim input for flow images + inputs = copy.deepcopy(self.flow_frame_results) + inputs['frame_inds'] = np.arange(0, self.total_frames, 2) + # since the test images start with index 1, we plus 1 to frame_inds + # in order to pass the CI + inputs['frame_inds'] = inputs['frame_inds'] + 1 + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2, + 240, 320) + assert results['original_shape'] == (240, 320) + + # test frame selector with 1 dim input for flow images + inputs = copy.deepcopy(self.flow_frame_results) + inputs['frame_inds'] = np.arange(1, self.total_frames, 2) + frame_selector = RawFrameDecode(io_backend='disk') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']) * 2, + 240, 320) + assert results['original_shape'] == (240, 320) + + # test frame selector in turbojpeg decording backend + # when start_index = 0 + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(0, self.total_frames, 5) + # since the test images start with index 1, we plus 1 to frame_inds + # in order to pass the CI + inputs['frame_inds'] = inputs['frame_inds'] + 1 + frame_selector = RawFrameDecode( + io_backend='disk', decoding_backend='turbojpeg') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + + # test frame selector in turbojpeg decording backend + inputs = copy.deepcopy(self.frame_results) + inputs['frame_inds'] = np.arange(1, self.total_frames, 5) + frame_selector = RawFrameDecode( + io_backend='disk', decoding_backend='turbojpeg') + results = frame_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert np.shape(results['imgs']) == (len(inputs['frame_inds']), 240, + 320, 3) + assert results['original_shape'] == (240, 320) + assert repr(frame_selector) == (f'{frame_selector.__class__.__name__}(' + f'io_backend=disk, ' + f'decoding_backend=turbojpeg)') + + def test_load_localization_feature(self): + target_keys = ['raw_feature'] + + action_result = copy.deepcopy(self.action_results) + + # test error cases + with pytest.raises(NotImplementedError): + load_localization_feature = LoadLocalizationFeature( + 'unsupport_ext') + + # test normal cases + load_localization_feature = LoadLocalizationFeature() + load_localization_feature_result = load_localization_feature( + action_result) + assert self.check_keys_contain(load_localization_feature_result.keys(), + target_keys) + assert load_localization_feature_result['raw_feature'].shape == (400, + 5) + assert repr(load_localization_feature) == ( + f'{load_localization_feature.__class__.__name__}(' + f'raw_feature_ext=.csv)') + + def test_generate_localization_label(self): + action_result = copy.deepcopy(self.action_results) + action_result['raw_feature'] = np.random.randn(400, 5) + + # test default setting + target_keys = ['gt_bbox'] + generate_localization_labels = GenerateLocalizationLabels() + generate_localization_labels_result = generate_localization_labels( + action_result) + assert self.check_keys_contain( + generate_localization_labels_result.keys(), target_keys) + + assert_array_almost_equal( + generate_localization_labels_result['gt_bbox'], [[0.375, 0.625]], + decimal=4) + + def test_load_proposals(self): + target_keys = [ + 'bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score', + 'reference_temporal_iou' + ] + + action_result = copy.deepcopy(self.action_results) + + # test error cases + with pytest.raises(NotImplementedError): + load_proposals = LoadProposals(5, self.proposals_dir, + self.bsp_feature_dir, + 'unsupport_ext') + + with pytest.raises(NotImplementedError): + load_proposals = LoadProposals(5, self.proposals_dir, + self.bsp_feature_dir, '.csv', + 'unsupport_ext') + + # test normal cases + load_proposals = LoadProposals(5, self.proposals_dir, + self.bsp_feature_dir) + load_proposals_result = load_proposals(action_result) + assert self.check_keys_contain(load_proposals_result.keys(), + target_keys) + assert (load_proposals_result['bsp_feature'].shape[0] == 5) + assert load_proposals_result['tmin'].shape == (5, ) + assert_array_almost_equal( + load_proposals_result['tmin'], np.arange(0.1, 0.6, 0.1), decimal=4) + assert load_proposals_result['tmax'].shape == (5, ) + assert_array_almost_equal( + load_proposals_result['tmax'], np.arange(0.2, 0.7, 0.1), decimal=4) + assert load_proposals_result['tmin_score'].shape == (5, ) + assert_array_almost_equal( + load_proposals_result['tmin_score'], + np.arange(0.95, 0.90, -0.01), + decimal=4) + assert load_proposals_result['tmax_score'].shape == (5, ) + assert_array_almost_equal( + load_proposals_result['tmax_score'], + np.arange(0.96, 0.91, -0.01), + decimal=4) + assert load_proposals_result['reference_temporal_iou'].shape == (5, ) + assert_array_almost_equal( + load_proposals_result['reference_temporal_iou'], + np.arange(0.85, 0.80, -0.01), + decimal=4) + assert repr(load_proposals) == ( + f'{load_proposals.__class__.__name__}(' + f'top_k={5}, ' + f'pgm_proposals_dir={self.proposals_dir}, ' + f'pgm_features_dir={self.bsp_feature_dir}, ' + f'proposal_ext=.csv, ' + f'feature_ext=.npy)') + + def test_audio_decode_init(self): + target_keys = ['audios', 'length', 'sample_rate'] + inputs = copy.deepcopy(self.audio_results) + audio_decode_init = AudioDecodeInit() + results = audio_decode_init(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + + # test when no audio file exists + inputs = copy.deepcopy(self.audio_results) + inputs['audio_path'] = 'foo/foo/bar.wav' + audio_decode_init = AudioDecodeInit() + results = audio_decode_init(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert results['audios'].shape == (10.0 * + audio_decode_init.sample_rate, ) + assert repr(audio_decode_init) == ( + f'{audio_decode_init.__class__.__name__}(' + f'io_backend=disk, ' + f'sample_rate=16000, ' + f'pad_method=zero)') + + def test_audio_decode(self): + target_keys = ['frame_inds', 'audios'] + inputs = copy.deepcopy(self.audio_results) + inputs['frame_inds'] = np.arange(0, self.audio_total_frames, + 2)[:, np.newaxis] + inputs['num_clips'] = 1 + inputs['length'] = 1280 + audio_selector = AudioDecode() + results = audio_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + + def test_load_audio_feature(self): + target_keys = ['audios'] + inputs = copy.deepcopy(self.audio_feature_results) + load_audio_feature = LoadAudioFeature() + results = load_audio_feature(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + + # test when no audio feature file exists + inputs = copy.deepcopy(self.audio_feature_results) + inputs['audio_path'] = 'foo/foo/bar.npy' + load_audio_feature = LoadAudioFeature() + results = load_audio_feature(inputs) + assert results['audios'].shape == (640, 80) + assert self.check_keys_contain(results.keys(), target_keys) + assert repr(load_audio_feature) == ( + f'{load_audio_feature.__class__.__name__}(' + f'pad_method=zero)') + + def test_audio_feature_selector(self): + target_keys = ['audios'] + # test frame selector with 2 dim input + inputs = copy.deepcopy(self.audio_feature_results) + inputs['frame_inds'] = np.arange(0, self.audio_total_frames, + 2)[:, np.newaxis] + inputs['num_clips'] = 1 + inputs['length'] = 1280 + audio_feature_selector = AudioFeatureSelector() + results = audio_feature_selector(inputs) + assert self.check_keys_contain(results.keys(), target_keys) + assert repr(audio_feature_selector) == ( + f'{audio_feature_selector.__class__.__name__}(' + f'fix_length={128})') + + def test_pyav_decode_motion_vector(self): + pyav_init = PyAVInit() + pyav = PyAVDecodeMotionVector() + + # test pyav with 2-dim input + results = { + 'filename': self.video_path, + 'frame_inds': np.arange(0, 32, 1)[:, np.newaxis] + } + results = pyav_init(results) + results = pyav(results) + target_keys = ['motion_vectors'] + assert self.check_keys_contain(results.keys(), target_keys) + + # test pyav with 1 dim input + results = { + 'filename': self.video_path, + 'frame_inds': np.arange(0, 32, 1) + } + pyav_init = PyAVInit() + results = pyav_init(results) + pyav = PyAVDecodeMotionVector() + results = pyav(results) + + assert self.check_keys_contain(results.keys(), target_keys) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_gradcam.py b/Downstream/Open-Set-Action-Recognition/tests/test_gradcam.py new file mode 100644 index 0000000..96b834d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_gradcam.py @@ -0,0 +1,268 @@ +import os.path as osp + +import mmcv +import numpy as np +import pytest +import torch + +from mmaction.models import build_recognizer +from mmaction.utils.gradcam_utils import GradCAM + + +def _get_cfg(fname): + """Grab configs necessary to create a recognizer. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + repo_dpath = osp.dirname(osp.dirname(__file__)) + config_dpath = osp.join(repo_dpath, 'configs/recognition') + config_fpath = osp.join(config_dpath, fname) + if not osp.exists(config_dpath): + raise Exception('Cannot find config path') + config = mmcv.Config.fromfile(config_fpath) + return config + + +def _get_target_shapes(input_shape, num_classes=400, model_type='2D'): + if model_type not in ['2D', '3D']: + raise ValueError(f'Data type {model_type} is not available') + + preds_target_shape = (input_shape[0], num_classes) + if model_type == '3D': + # input shape (batch_size, num_crops*num_clips, C, clip_len, H, W) + # target shape (batch_size*num_crops*num_clips, clip_len, H, W, C) + blended_imgs_target_shape = (input_shape[0] * input_shape[1], + input_shape[3], input_shape[4], + input_shape[5], input_shape[2]) + else: + # input shape (batch_size, num_segments, C, H, W) + # target shape (batch_size, num_segments, H, W, C) + blended_imgs_target_shape = (input_shape[0], input_shape[1], + input_shape[3], input_shape[4], + input_shape[2]) + + return blended_imgs_target_shape, preds_target_shape + + +def _generate_gradcam_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'): + """Create a superset of inputs needed to run gradcam. + + Args: + input_shape (tuple[int]): input batch dimensions. + Default: (1, 3, 3, 224, 224). + model_type (str): Model type for data generation, from {'2D', '3D'}. + Default:'2D' + return: + dict: model inputs, including two keys, ``imgs`` and ``label``. + """ + imgs = np.random.random(input_shape) + + if model_type in ['2D', '3D']: + gt_labels = torch.LongTensor([2] * input_shape[0]) + else: + raise ValueError(f'Data type {model_type} is not available') + + inputs = { + 'imgs': torch.FloatTensor(imgs), + 'label': gt_labels, + } + return inputs + + +def _do_test_2D_models(recognizer, + target_layer_name, + input_shape, + num_classes=400, + device='cpu'): + demo_inputs = _generate_gradcam_inputs(input_shape) + demo_inputs['imgs'] = demo_inputs['imgs'].to(device) + demo_inputs['label'] = demo_inputs['label'].to(device) + + recognizer = recognizer.to(device) + gradcam = GradCAM(recognizer, target_layer_name) + + blended_imgs_target_shape, preds_target_shape = _get_target_shapes( + input_shape, num_classes=num_classes, model_type='2D') + + blended_imgs, preds = gradcam(demo_inputs) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + blended_imgs, preds = gradcam(demo_inputs, True) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + +def _do_test_3D_models(recognizer, + target_layer_name, + input_shape, + num_classes=400): + blended_imgs_target_shape, preds_target_shape = _get_target_shapes( + input_shape, num_classes=num_classes, model_type='3D') + demo_inputs = _generate_gradcam_inputs(input_shape, '3D') + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + recognizer = recognizer.cuda() + demo_inputs['imgs'] = demo_inputs['imgs'].cuda() + demo_inputs['label'] = demo_inputs['label'].cuda() + gradcam = GradCAM(recognizer, target_layer_name) + + blended_imgs, preds = gradcam(demo_inputs) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + blended_imgs, preds = gradcam(demo_inputs, True) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + else: + gradcam = GradCAM(recognizer, target_layer_name) + + blended_imgs, preds = gradcam(demo_inputs) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + blended_imgs, preds = gradcam(demo_inputs, True) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + +def test_tsn(): + config = _get_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + + input_shape = (1, 25, 3, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_2D_models(recognizer, target_layer_name, input_shape) + + +def test_i3d(): + config = _get_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + + input_shape = [1, 1, 3, 32, 32, 32] + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_r2plus1d(): + config = _get_cfg('r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + config.model['backbone']['norm_cfg'] = dict(type='BN3d') + + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + + input_shape = (1, 3, 3, 8, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_slowfast(): + config = _get_cfg('slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py') + + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + + input_shape = (1, 1, 3, 32, 32, 32) + target_layer_name = 'backbone/slow_path/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_tsm(): + config = _get_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py') + config.model['backbone']['pretrained'] = None + target_layer_name = 'backbone/layer4/1/relu' + + # base config + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + input_shape = (1, 8, 3, 32, 32) + _do_test_2D_models(recognizer, target_layer_name, input_shape) + + # test twice sample + 3 crops, 2*3*8=48 + test_cfg = dict(average_clips='prob') + recognizer = build_recognizer(config.model, test_cfg=test_cfg) + recognizer.cfg = config + input_shape = (1, 48, 3, 32, 32) + _do_test_2D_models(recognizer, target_layer_name, input_shape) + + +def test_csn(): + config = _get_cfg( + 'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + input_shape = (1, 1, 3, 32, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_tpn(): + target_layer_name = 'backbone/layer4/1/relu' + + config = _get_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + + input_shape = (1, 8, 3, 32, 32) + _do_test_2D_models(recognizer, target_layer_name, input_shape, 174) + + config = _get_cfg('tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + input_shape = (1, 3, 3, 8, 32, 32) + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_c3d(): + config = _get_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + input_shape = (1, 1, 3, 16, 112, 112) + target_layer_name = 'backbone/conv5a/activate' + _do_test_3D_models(recognizer, target_layer_name, input_shape, 101) + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_tin(): + config = _get_cfg('tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py') + config.model['backbone']['pretrained'] = None + target_layer_name = 'backbone/layer4/1/relu' + + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + input_shape = (1, 8, 3, 64, 64) + _do_test_2D_models( + recognizer, target_layer_name, input_shape, device='cuda:0') + + +def test_x3d(): + config = _get_cfg('x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = build_recognizer(config.model, test_cfg=config.test_cfg) + recognizer.cfg = config + input_shape = (1, 1, 3, 13, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + _do_test_3D_models(recognizer, target_layer_name, input_shape) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_localization_utils.py b/Downstream/Open-Set-Action-Recognition/tests/test_localization_utils.py new file mode 100644 index 0000000..a938898 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_localization_utils.py @@ -0,0 +1,201 @@ +import os.path as osp + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal + +from mmaction.localization import (generate_bsp_feature, + generate_candidate_proposals, soft_nms, + temporal_iop, temporal_iou) + + +def test_temporal_iou(): + anchors_min = np.array([0.0, 0.5]) + anchors_max = np.array([1.0, 1.5]) + box_min = 0.5 + box_max = 1.0 + + iou = temporal_iou(anchors_min, anchors_max, box_min, box_max) + assert_array_equal(iou, np.array([0.5, 0.5])) + + +def test_temporal_iop(): + anchors_min = np.array([0.0, 0.5]) + anchors_max = np.array([1.0, 1.5]) + box_min = 0.4 + box_max = 1.1 + + ioa = temporal_iop(anchors_min, anchors_max, box_min, box_max) + assert_array_almost_equal(ioa, np.array([0.6, 0.6])) + + +def test_soft_nms(): + proposals = np.array([[0., 1., 1., 1., 0.5, 0.5], + [0., 0.4, 1., 1., 0.4, 0.4], + [0., 0.95, 1., 1., 0.6, 0.6]]) + proposal_list = soft_nms(proposals, 0.75, 0.65, 0.9, 1) + assert_array_equal(proposal_list, [[0., 0.95, 0.6], [0., 0.4, 0.4]]) + + +def test_generate_candidate_proposals(): + video_list = [0, 1] + video_infos = [ + dict( + video_name='v_test1', + duration_second=100, + duration_frame=1000, + annotations=[{ + 'segment': [30.0, 60.0], + 'label': 'Rock climbing' + }], + feature_frame=900), + dict( + video_name='v_test2', + duration_second=100, + duration_frame=1000, + annotations=[{ + 'segment': [6.0, 8.0], + 'label': 'Drinking beer' + }], + feature_frame=900) + ] + tem_results_dir = osp.join(osp.dirname(__file__), 'data/test_tem_results') + + # test when tem_result_ext is not valid + with pytest.raises(NotImplementedError): + result_dict = generate_candidate_proposals( + video_list, + video_infos, + tem_results_dir, + 5, + 0.5, + tem_results_ext='unsupport_ext') + # test without result_dict + assert_result1 = np.array([ + [0.1, 0.7, 0.58390868, 0.35708317, 0.20850396, 0.55555556, 0.55555556], + [0.1, 0.5, 0.58390868, 0.32605207, 0.19038463, 0.29411765, 0.41666667], + [0.1, 0.3, 0.58390868, 0.26221931, 0.15311213, 0., 0.], + [0.3, 0.7, 0.30626667, 0.35708317, 0.10936267, 0.83333333, 0.83333333], + [0.3, 0.5, 0.30626667, 0.32605207, 0.09985888, 0.45454545, 0.83333333] + ]) + assert_result2 = np.array( + [[0.1, 0.3, 0.78390867, 0.3622193, 0.28394685, 0., 0.], + [0.1, 0.7, 0.78390867, 0.35708317, 0.27992059, 0., 0.], + [0.1, 0.5, 0.78390867, 0.32605207, 0.25559504, 0., 0.]]) + result_dict = generate_candidate_proposals(video_list, video_infos, + tem_results_dir, 5, 0.5) + + assert_array_almost_equal(result_dict['v_test1'], assert_result1) + assert_array_almost_equal(result_dict['v_test2'], assert_result2) + + # test with result_dict + result_dict = {} + generate_candidate_proposals( + video_list, + video_infos, + tem_results_dir, + 5, + 0.5, + result_dict=result_dict) + + assert_array_almost_equal(result_dict['v_test1'], assert_result1) + assert_array_almost_equal(result_dict['v_test2'], assert_result2) + + +def test_generate_bsp_feature(): + video_list = [0, 1] + video_infos = [ + dict( + video_name='v_test1', + duration_second=100, + duration_frame=1000, + annotations=[{ + 'segment': [30.0, 60.0], + 'label': 'Rock climbing' + }], + feature_frame=900), + dict( + video_name='v_test2', + duration_second=100, + duration_frame=1000, + annotations=[{ + 'segment': [6.0, 8.0], + 'label': 'Drinking beer' + }], + feature_frame=900) + ] + tem_results_dir = osp.join(osp.dirname(__file__), 'data/test_tem_results') + pgm_proposals_dir = osp.join(osp.dirname(__file__), 'data/test_proposals') + + # test when extension is not valid + with pytest.raises(NotImplementedError): + result_dict = generate_bsp_feature( + video_list, + video_infos, + tem_results_dir, + pgm_proposals_dir, + tem_results_ext='unsupport_ext') + + with pytest.raises(NotImplementedError): + result_dict = generate_bsp_feature( + video_list, + video_infos, + tem_results_dir, + pgm_proposals_dir, + pgm_proposal_ext='unsupport_ext') + + # test without result_dict + result_dict = generate_bsp_feature( + video_list, video_infos, tem_results_dir, pgm_proposals_dir, top_k=2) + assert_result1 = np.array( + [[ + 0.02633105, 0.02489364, 0.02345622, 0.0220188, 0.02058138, + 0.01914396, 0.01770654, 0.01626912, 0.01541432, 0.01514214, + 0.01486995, 0.01459776, 0.01432558, 0.01405339, 0.01378121, + 0.01350902, 0.03064331, 0.02941124, 0.02817916, 0.02694709, + 0.02571502, 0.02448295, 0.02325087, 0.0220188, 0.01432558, + 0.01409228, 0.01385897, 0.01362567, 0.01339237, 0.01315907, + 0.01292577, 0.01269246 + ], + [ + 0.01350902, 0.01323684, 0.01296465, 0.01269246, 0.01242028, + 0.01214809, 0.01187591, 0.01160372, 0.01154264, 0.01169266, + 0.01184269, 0.01199271, 0.01214273, 0.01229275, 0.01244278, + 0.0125928, 0.01432558, 0.01409228, 0.01385897, 0.01362567, + 0.01339237, 0.01315907, 0.01292577, 0.01269246, 0.01214273, + 0.01227132, 0.01239991, 0.0125285, 0.0126571, 0.01278569, + 0.01291428, 0.01304287 + ]]) + assert_result2 = np.array( + [[ + 0.04133105, 0.03922697, 0.03712288, 0.0350188, 0.03291471, + 0.03081063, 0.02870654, 0.02660246, 0.02541432, 0.02514214, + 0.02486995, 0.02459776, 0.02432558, 0.02405339, 0.02378121, + 0.02350902, 0.04764331, 0.04583981, 0.04403631, 0.04223281, + 0.0404293, 0.0386258, 0.0368223, 0.0350188, 0.02432558, 0.02409228, + 0.02385897, 0.02362567, 0.02339237, 0.02315907, 0.02292577, + 0.02269246 + ], + [ + 0.02350902, 0.02323684, 0.02296465, 0.02269246, 0.02242028, + 0.02214809, 0.02187591, 0.02160372, 0.02120931, 0.02069266, + 0.02017602, 0.01965937, 0.01914273, 0.01862609, 0.01810944, + 0.0175928, 0.02432558, 0.02409228, 0.02385897, 0.02362567, + 0.02339237, 0.02315907, 0.02292577, 0.02269246, 0.01914273, + 0.01869989, 0.01825706, 0.01781422, 0.01737138, 0.01692854, + 0.0164857, 0.01604287 + ]]) + assert_array_almost_equal(result_dict['v_test1'], assert_result1) + assert_array_almost_equal(result_dict['v_test2'], assert_result2) + + # test with result_dict + result_dict = {} + generate_bsp_feature( + video_list, + video_infos, + tem_results_dir, + pgm_proposals_dir, + top_k=2, + result_dict=result_dict) + assert_array_almost_equal(result_dict['v_test1'], assert_result1) + assert_array_almost_equal(result_dict['v_test2'], assert_result2) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_loss.py b/Downstream/Open-Set-Action-Recognition/tests/test_loss.py new file mode 100644 index 0000000..5f06dca --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_loss.py @@ -0,0 +1,292 @@ +import numpy as np +import pytest +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv import ConfigDict +from numpy.testing import assert_array_almost_equal +from torch.autograd import Variable + +from mmaction.models import (BCELossWithLogits, BinaryLogisticRegressionLoss, + BMNLoss, CrossEntropyLoss, HVULoss, NLLLoss, + OHEMHingeLoss, SSNLoss) + + +def test_hvu_loss(): + pred = torch.tensor([[-1.0525, -0.7085, 0.1819, -0.8011], + [0.1555, -1.5550, 0.5586, 1.9746]]) + gt = torch.tensor([[1., 0., 0., 0.], [0., 0., 1., 1.]]) + mask = torch.tensor([[1., 1., 0., 0.], [0., 0., 1., 1.]]) + category_mask = torch.tensor([[1., 0.], [0., 1.]]) + categories = ['action', 'scene'] + category_nums = (2, 2) + category_loss_weights = (1, 1) + loss_all_nomask_sum = HVULoss( + categories=categories, + category_nums=category_nums, + category_loss_weights=category_loss_weights, + loss_type='all', + with_mask=False, + reduction='sum') + loss = loss_all_nomask_sum(pred, gt, mask, category_mask) + loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none') + loss1 = torch.sum(loss1, dim=1) + assert torch.eq(loss['loss_cls'], torch.mean(loss1)) + + loss_all_mask = HVULoss( + categories=categories, + category_nums=category_nums, + category_loss_weights=category_loss_weights, + loss_type='all', + with_mask=True) + loss = loss_all_mask(pred, gt, mask, category_mask) + loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none') + loss1 = torch.sum(loss1 * mask, dim=1) / torch.sum(mask, dim=1) + loss1 = torch.mean(loss1) + assert torch.eq(loss['loss_cls'], loss1) + + loss_ind_mask = HVULoss( + categories=categories, + category_nums=category_nums, + category_loss_weights=category_loss_weights, + loss_type='individual', + with_mask=True) + loss = loss_ind_mask(pred, gt, mask, category_mask) + action_loss = F.binary_cross_entropy_with_logits(pred[:1, :2], gt[:1, :2]) + scene_loss = F.binary_cross_entropy_with_logits(pred[1:, 2:], gt[1:, 2:]) + loss1 = (action_loss + scene_loss) / 2 + assert torch.eq(loss['loss_cls'], loss1) + + loss_ind_nomask_sum = HVULoss( + categories=categories, + category_nums=category_nums, + category_loss_weights=category_loss_weights, + loss_type='individual', + with_mask=False, + reduction='sum') + loss = loss_ind_nomask_sum(pred, gt, mask, category_mask) + action_loss = F.binary_cross_entropy_with_logits( + pred[:, :2], gt[:, :2], reduction='none') + action_loss = torch.sum(action_loss, dim=1) + action_loss = torch.mean(action_loss) + + scene_loss = F.binary_cross_entropy_with_logits( + pred[:, 2:], gt[:, 2:], reduction='none') + scene_loss = torch.sum(scene_loss, dim=1) + scene_loss = torch.mean(scene_loss) + + loss1 = (action_loss + scene_loss) / 2 + assert torch.eq(loss['loss_cls'], loss1) + + +def test_cross_entropy_loss(): + cls_scores = torch.rand((3, 4)) + gt_labels = torch.LongTensor([2] * 3).squeeze() + + cross_entropy_loss = CrossEntropyLoss() + output_loss = cross_entropy_loss(cls_scores, gt_labels) + assert torch.equal(output_loss, F.cross_entropy(cls_scores, gt_labels)) + + +def test_bce_loss_with_logits(): + cls_scores = torch.rand((3, 4)) + gt_labels = torch.rand((3, 4)) + bce_loss_with_logits = BCELossWithLogits() + output_loss = bce_loss_with_logits(cls_scores, gt_labels) + assert torch.equal( + output_loss, F.binary_cross_entropy_with_logits(cls_scores, gt_labels)) + + +def test_nll_loss(): + cls_scores = torch.randn(3, 3) + gt_labels = torch.tensor([0, 2, 1]).squeeze() + + sm = nn.Softmax(dim=0) + nll_loss = NLLLoss() + cls_score_log = torch.log(sm(cls_scores)) + output_loss = nll_loss(cls_score_log, gt_labels) + assert torch.equal(output_loss, F.nll_loss(cls_score_log, gt_labels)) + + +def test_binary_logistic_loss(): + binary_logistic_regression_loss = BinaryLogisticRegressionLoss() + reg_score = torch.tensor([0., 1.]) + label = torch.tensor([0., 1.]) + output_loss = binary_logistic_regression_loss(reg_score, label, 0.5) + assert_array_almost_equal(output_loss.numpy(), np.array([0.]), decimal=4) + + reg_score = torch.tensor([0.3, 0.9]) + label = torch.tensor([0., 1.]) + output_loss = binary_logistic_regression_loss(reg_score, label, 0.5) + assert_array_almost_equal( + output_loss.numpy(), np.array([0.231]), decimal=4) + + +def test_bmn_loss(): + bmn_loss = BMNLoss() + + # test tem_loss + pred_start = torch.tensor([0.9, 0.1]) + pred_end = torch.tensor([0.1, 0.9]) + gt_start = torch.tensor([1., 0.]) + gt_end = torch.tensor([0., 1.]) + output_tem_loss = bmn_loss.tem_loss(pred_start, pred_end, gt_start, gt_end) + binary_logistic_regression_loss = BinaryLogisticRegressionLoss() + assert_loss = ( + binary_logistic_regression_loss(pred_start, gt_start) + + binary_logistic_regression_loss(pred_end, gt_end)) + assert_array_almost_equal( + output_tem_loss.numpy(), assert_loss.numpy(), decimal=4) + + # test pem_reg_loss + seed = 1 + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + pred_bm_reg = torch.tensor([[0.1, 0.99], [0.5, 0.4]]) + gt_iou_map = torch.tensor([[0, 1.], [0, 1.]]) + mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]]) + output_pem_reg_loss = bmn_loss.pem_reg_loss(pred_bm_reg, gt_iou_map, mask) + assert_array_almost_equal( + output_pem_reg_loss.numpy(), np.array([0.2140]), decimal=4) + + # test pem_cls_loss + pred_bm_cls = torch.tensor([[0.1, 0.99], [0.95, 0.2]]) + gt_iou_map = torch.tensor([[0., 1.], [0., 1.]]) + mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]]) + output_pem_cls_loss = bmn_loss.pem_cls_loss(pred_bm_cls, gt_iou_map, mask) + assert_array_almost_equal( + output_pem_cls_loss.numpy(), np.array([1.6137]), decimal=4) + + # test bmn_loss + pred_bm = torch.tensor([[[[0.1, 0.99], [0.5, 0.4]], + [[0.1, 0.99], [0.95, 0.2]]]]) + pred_start = torch.tensor([[0.9, 0.1]]) + pred_end = torch.tensor([[0.1, 0.9]]) + gt_iou_map = torch.tensor([[[0., 2.5], [0., 10.]]]) + gt_start = torch.tensor([[1., 0.]]) + gt_end = torch.tensor([[0., 1.]]) + mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]]) + output_loss = bmn_loss(pred_bm, pred_start, pred_end, gt_iou_map, gt_start, + gt_end, mask) + assert_array_almost_equal( + output_loss[0].numpy(), + output_tem_loss + 10 * output_pem_reg_loss + output_pem_cls_loss) + assert_array_almost_equal(output_loss[1].numpy(), output_tem_loss) + assert_array_almost_equal(output_loss[2].numpy(), output_pem_reg_loss) + assert_array_almost_equal(output_loss[3].numpy(), output_pem_cls_loss) + + +def test_ohem_hinge_loss(): + # test normal case + pred = torch.tensor([[ + 0.5161, 0.5228, 0.7748, 0.0573, 0.1113, 0.8862, 0.1752, 0.9448, 0.0253, + 0.1009, 0.4371, 0.2232, 0.0412, 0.3487, 0.3350, 0.9294, 0.7122, 0.3072, + 0.2942, 0.7679 + ]], + requires_grad=True) + gt = torch.tensor([8]) + num_video = 1 + loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video) + assert_array_almost_equal( + loss.detach().numpy(), np.array([0.0552]), decimal=4) + loss.backward(Variable(torch.ones([1]))) + assert_array_almost_equal( + np.array(pred.grad), + np.array([[ + 0., 0., 0., 0., 0., 0., 0., -1., 0., 0., 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0. + ]]), + decimal=4) + + # test error case + with pytest.raises(ValueError): + gt = torch.tensor([8, 10]) + loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video) + + +def test_ssn_loss(): + ssn_loss = SSNLoss() + + # test activity_loss + activity_score = torch.rand((8, 21)) + labels = torch.LongTensor([8] * 8).squeeze() + activity_indexer = torch.tensor([0, 7]) + output_activity_loss = ssn_loss.activity_loss(activity_score, labels, + activity_indexer) + assert torch.equal( + output_activity_loss, + F.cross_entropy(activity_score[activity_indexer, :], + labels[activity_indexer])) + + # test completeness_loss + completeness_score = torch.rand((8, 20), requires_grad=True) + labels = torch.LongTensor([8] * 8).squeeze() + completeness_indexer = torch.tensor([0, 1, 2, 3, 4, 5, 6]) + positive_per_video = 1 + incomplete_per_video = 6 + output_completeness_loss = ssn_loss.completeness_loss( + completeness_score, labels, completeness_indexer, positive_per_video, + incomplete_per_video) + + pred = completeness_score[completeness_indexer, :] + gt = labels[completeness_indexer] + pred_dim = pred.size(1) + pred = pred.view(-1, positive_per_video + incomplete_per_video, pred_dim) + gt = gt.view(-1, positive_per_video + incomplete_per_video) + # yapf:disable + positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501 + incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501 + # yapf:enable + ohem_ratio = 0.17 + positive_loss = OHEMHingeLoss.apply( + positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1, + 1.0, positive_per_video) + incomplete_loss = OHEMHingeLoss.apply( + incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), -1, + ohem_ratio, incomplete_per_video) + num_positives = positive_pred.size(0) + num_incompletes = int(incomplete_pred.size(0) * ohem_ratio) + assert_loss = ((positive_loss + incomplete_loss) / + float(num_positives + num_incompletes)) + assert torch.equal(output_completeness_loss, assert_loss) + + # test reg_loss + bbox_pred = torch.rand((8, 20, 2)) + labels = torch.LongTensor([8] * 8).squeeze() + bbox_targets = torch.rand((8, 2)) + regression_indexer = torch.tensor([0]) + output_reg_loss = ssn_loss.classwise_regression_loss( + bbox_pred, labels, bbox_targets, regression_indexer) + + pred = bbox_pred[regression_indexer, :, :] + gt = labels[regression_indexer] + reg_target = bbox_targets[regression_indexer, :] + class_idx = gt.data - 1 + classwise_pred = pred[:, class_idx, :] + classwise_reg_pred = torch.cat((torch.diag(classwise_pred[:, :, 0]).view( + -1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)), + dim=1) + assert torch.equal( + output_reg_loss, + F.smooth_l1_loss(classwise_reg_pred.view(-1), reg_target.view(-1)) * 2) + + # test ssn_loss + proposal_type = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 2]]) + train_cfg = ConfigDict( + dict( + ssn=dict( + sampler=dict( + num_per_video=8, + positive_ratio=1, + background_ratio=1, + incomplete_ratio=6, + add_gt_as_proposals=True), + loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1)))) + output_loss = ssn_loss(activity_score, completeness_score, bbox_pred, + proposal_type, labels, bbox_targets, train_cfg) + assert torch.equal(output_loss['loss_activity'], output_activity_loss) + assert torch.equal(output_loss['loss_completeness'], + output_completeness_loss * 0.1) + assert torch.equal(output_loss['loss_reg'], output_reg_loss * 0.1) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_backbone.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_backbone.py new file mode 100644 index 0000000..f6931f7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_backbone.py @@ -0,0 +1,1033 @@ +import copy + +import numpy as np +import pytest +import torch +import torch.nn as nn +from mmcv.utils import _BatchNorm + +from mmaction.models import (C3D, X3D, ResNet, ResNet2Plus1d, ResNet3d, + ResNet3dCSN, ResNet3dSlowFast, ResNet3dSlowOnly, + ResNetAudio, ResNetTIN, ResNetTSM) +from mmaction.models.backbones.resnet_tsm import NL3DWrapper + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_backbone(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # ResNet depth should be in [18, 34, 50, 101, 152] + ResNet(20) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrain must be a str + resnet50 = ResNet(50, pretrained=0) + resnet50.init_weights() + + with pytest.raises(AssertionError): + # style must be in ['pytorch', 'caffe'] + ResNet(18, style='tensorflow') + + with pytest.raises(AssertionError): + # assert not with_cp + ResNet(18, with_cp=True) + + # resnet with depth 18, norm_eval False, initial weights + resnet18 = ResNet(18) + resnet18.init_weights() + + # resnet with depth 50, norm_eval True + resnet50 = ResNet(50, norm_eval=True) + resnet50.init_weights() + resnet50.train() + assert check_norm_state(resnet50.modules(), False) + + # resnet with depth 50, norm_eval True, pretrained + resnet50_pretrain = ResNet( + pretrained='torchvision://resnet50', depth=50, norm_eval=True) + resnet50_pretrain.init_weights() + resnet50_pretrain.train() + assert check_norm_state(resnet50_pretrain.modules(), False) + + # resnet with depth 50, norm_eval True, frozen_stages 1 + frozen_stages = 1 + resnet50_frozen = ResNet(50, frozen_stages=frozen_stages) + resnet50_frozen.init_weights() + resnet50_frozen.train() + assert resnet50_frozen.conv1.bn.training is False + for layer in resnet50_frozen.conv1.modules(): + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(resnet50_frozen, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # resnet with depth 50, partial batchnorm + resnet_pbn = ResNet(50, partial_bn=True) + resnet_pbn.train() + count_bn = 0 + for m in resnet_pbn.modules(): + if isinstance(m, nn.BatchNorm2d): + count_bn += 1 + if count_bn >= 2: + assert m.weight.requires_grad is False + assert m.bias.requires_grad is False + assert m.training is False + else: + assert m.weight.requires_grad is True + assert m.bias.requires_grad is True + assert m.training is True + + input_shape = (1, 3, 64, 64) + imgs = _demo_inputs(input_shape) + + # resnet with depth 18 inference + resnet18 = ResNet(18, norm_eval=False) + resnet18.init_weights() + resnet18.train() + feat = resnet18(imgs) + assert feat.shape == torch.Size([1, 512, 2, 2]) + + # resnet with depth 50 inference + resnet50 = ResNet(50, norm_eval=False) + resnet50.init_weights() + resnet50.train() + feat = resnet50(imgs) + assert feat.shape == torch.Size([1, 2048, 2, 2]) + + # resnet with depth 50 in caffe style inference + resnet50_caffe = ResNet(50, style='caffe', norm_eval=False) + resnet50_caffe.init_weights() + resnet50_caffe.train() + feat = resnet50_caffe(imgs) + assert feat.shape == torch.Size([1, 2048, 2, 2]) + + +def test_x3d_backbone(): + """Test resnet3d backbone.""" + with pytest.raises(AssertionError): + # In X3D: 1 <= num_stages <= 4 + X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=0) + + with pytest.raises(AssertionError): + # In X3D: 1 <= num_stages <= 4 + X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=5) + + with pytest.raises(AssertionError): + # len(spatial_strides) == num_stages + X3D(gamma_w=1.0, + gamma_b=2.25, + gamma_d=2.2, + spatial_strides=(1, 2), + num_stages=4) + + with pytest.raises(AssertionError): + # se_style in ['half', 'all'] + X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, se_style=None) + + with pytest.raises(AssertionError): + # se_ratio should be None or > 0 + X3D(gamma_w=1.0, + gamma_b=2.25, + gamma_d=2.2, + se_style='half', + se_ratio=0) + + # x3d_s, no pretrained, norm_eval True + x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=True) + x3d_s.init_weights() + x3d_s.train() + assert check_norm_state(x3d_s.modules(), False) + + # x3d_l, no pretrained, norm_eval True + x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=True) + x3d_l.init_weights() + x3d_l.train() + assert check_norm_state(x3d_l.modules(), False) + + # x3d_s, no pretrained, norm_eval False + x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=False) + x3d_s.init_weights() + x3d_s.train() + assert check_norm_state(x3d_s.modules(), True) + + # x3d_l, no pretrained, norm_eval False + x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=False) + x3d_l.init_weights() + x3d_l.train() + assert check_norm_state(x3d_l.modules(), True) + + # x3d_s, no pretrained, frozen_stages, norm_eval False + frozen_stages = 1 + x3d_s_frozen = X3D( + gamma_w=1.0, + gamma_b=2.25, + gamma_d=2.2, + norm_eval=False, + frozen_stages=frozen_stages) + + x3d_s_frozen.init_weights() + x3d_s_frozen.train() + assert x3d_s_frozen.conv1_t.bn.training is False + for param in x3d_s_frozen.conv1_s.parameters(): + assert param.requires_grad is False + for param in x3d_s_frozen.conv1_t.parameters(): + assert param.requires_grad is False + + for i in range(1, frozen_stages + 1): + layer = getattr(x3d_s_frozen, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # test zero_init_residual, zero_init_residual is True by default + for m in x3d_s_frozen.modules(): + if hasattr(m, 'conv3'): + assert torch.equal(m.conv3.bn.weight, + torch.zeros_like(m.conv3.bn.weight)) + assert torch.equal(m.conv3.bn.bias, + torch.zeros_like(m.conv3.bn.bias)) + + # x3d_s inference + input_shape = (1, 3, 13, 160, 160) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + x3d_s_frozen = x3d_s_frozen.cuda() + imgs_gpu = imgs.cuda() + feat = x3d_s_frozen(imgs_gpu) + assert feat.shape == torch.Size([1, 432, 13, 5, 5]) + else: + feat = x3d_s_frozen(imgs) + assert feat.shape == torch.Size([1, 432, 13, 5, 5]) + + # x3d_m inference + input_shape = (1, 3, 16, 224, 224) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + x3d_s_frozen = x3d_s_frozen.cuda() + imgs_gpu = imgs.cuda() + feat = x3d_s_frozen(imgs_gpu) + assert feat.shape == torch.Size([1, 432, 16, 7, 7]) + else: + feat = x3d_s_frozen(imgs) + assert feat.shape == torch.Size([1, 432, 16, 7, 7]) + + +def test_resnet3d_backbone(): + """Test resnet3d backbone.""" + with pytest.raises(AssertionError): + # In ResNet3d: 1 <= num_stages <= 4 + ResNet3d(34, None, num_stages=0) + + with pytest.raises(AssertionError): + # In ResNet3d: 1 <= num_stages <= 4 + ResNet3d(34, None, num_stages=5) + + with pytest.raises(AssertionError): + # In ResNet3d: 1 <= num_stages <= 4 + ResNet3d(50, None, num_stages=0) + + with pytest.raises(AssertionError): + # In ResNet3d: 1 <= num_stages <= 4 + ResNet3d(50, None, num_stages=5) + + with pytest.raises(AssertionError): + # len(spatial_strides) == len(temporal_strides) + # == len(dilations) == num_stages + ResNet3d( + 50, + None, + spatial_strides=(1, ), + temporal_strides=(1, 1), + dilations=(1, 1, 1), + num_stages=4) + + with pytest.raises(AssertionError): + # len(spatial_strides) == len(temporal_strides) + # == len(dilations) == num_stages + ResNet3d( + 34, + None, + spatial_strides=(1, ), + temporal_strides=(1, 1), + dilations=(1, 1, 1), + num_stages=4) + + with pytest.raises(TypeError): + # pretrain must be str or None. + resnet3d_34 = ResNet3d(34, ['resnet', 'bninception']) + resnet3d_34.init_weights() + + with pytest.raises(TypeError): + # pretrain must be str or None. + resnet3d_50 = ResNet3d(50, ['resnet', 'bninception']) + resnet3d_50.init_weights() + + # resnet3d with depth 34, no pretrained, norm_eval True + resnet3d_34 = ResNet3d(34, None, pretrained2d=False, norm_eval=True) + resnet3d_34.init_weights() + resnet3d_34.train() + assert check_norm_state(resnet3d_34.modules(), False) + + # resnet3d with depth 50, no pretrained, norm_eval True + resnet3d_50 = ResNet3d(50, None, pretrained2d=False, norm_eval=True) + resnet3d_50.init_weights() + resnet3d_50.train() + assert check_norm_state(resnet3d_50.modules(), False) + + # resnet3d with depth 50, pretrained2d, norm_eval True + resnet3d_50_pretrain = ResNet3d( + 50, 'torchvision://resnet50', norm_eval=True) + resnet3d_50_pretrain.init_weights() + resnet3d_50_pretrain.train() + assert check_norm_state(resnet3d_50_pretrain.modules(), False) + from mmcv.runner import _load_checkpoint + chkp_2d = _load_checkpoint('torchvision://resnet50') + for name, module in resnet3d_50_pretrain.named_modules(): + if len(name.split('.')) == 4: + # layer.block.module.submodule + prefix = name.split('.')[:2] + module_type = name.split('.')[2] + submodule_type = name.split('.')[3] + + if module_type == 'downsample': + name2d = name.replace('conv', '0').replace('bn', '1') + else: + layer_id = name.split('.')[2][-1] + name2d = prefix[0] + '.' + prefix[1] + '.' + \ + submodule_type + layer_id + + if isinstance(module, nn.Conv3d): + conv2d_weight = chkp_2d[name2d + '.weight'] + conv3d_weight = getattr(module, 'weight').data + assert torch.equal( + conv3d_weight, + conv2d_weight.data.unsqueeze(2).expand_as(conv3d_weight) / + conv3d_weight.shape[2]) + if getattr(module, 'bias') is not None: + conv2d_bias = chkp_2d[name2d + '.bias'] + conv3d_bias = getattr(module, 'bias').data + assert torch.equal(conv2d_bias, conv3d_bias) + + elif isinstance(module, nn.BatchNorm3d): + for pname in ['weight', 'bias', 'running_mean', 'running_var']: + param_2d = chkp_2d[name2d + '.' + pname] + param_3d = getattr(module, pname).data + assert torch.equal(param_2d, param_3d) + + conv3d = resnet3d_50_pretrain.conv1.conv + assert torch.equal( + conv3d.weight, + chkp_2d['conv1.weight'].unsqueeze(2).expand_as(conv3d.weight) / + conv3d.weight.shape[2]) + conv3d = resnet3d_50_pretrain.layer3[2].conv2.conv + assert torch.equal( + conv3d.weight, chkp_2d['layer3.2.conv2.weight'].unsqueeze(2).expand_as( + conv3d.weight) / conv3d.weight.shape[2]) + + # resnet3d with depth 34, no pretrained, norm_eval False + resnet3d_34_no_bn_eval = ResNet3d( + 34, None, pretrained2d=False, norm_eval=False) + resnet3d_34_no_bn_eval.init_weights() + resnet3d_34_no_bn_eval.train() + assert check_norm_state(resnet3d_34_no_bn_eval.modules(), True) + + # resnet3d with depth 50, no pretrained, norm_eval False + resnet3d_50_no_bn_eval = ResNet3d( + 50, None, pretrained2d=False, norm_eval=False) + resnet3d_50_no_bn_eval.init_weights() + resnet3d_50_no_bn_eval.train() + assert check_norm_state(resnet3d_50_no_bn_eval.modules(), True) + + # resnet3d with depth 34, no pretrained, frozen_stages, norm_eval False + frozen_stages = 1 + resnet3d_34_frozen = ResNet3d( + 34, None, pretrained2d=False, frozen_stages=frozen_stages) + resnet3d_34_frozen.init_weights() + resnet3d_34_frozen.train() + assert resnet3d_34_frozen.conv1.bn.training is False + for param in resnet3d_34_frozen.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(resnet3d_34_frozen, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + # test zero_init_residual + for m in resnet3d_34_frozen.modules(): + if hasattr(m, 'conv2'): + assert torch.equal(m.conv2.bn.weight, + torch.zeros_like(m.conv2.bn.weight)) + assert torch.equal(m.conv2.bn.bias, + torch.zeros_like(m.conv2.bn.bias)) + + # resnet3d with depth 50, no pretrained, frozen_stages, norm_eval False + frozen_stages = 1 + resnet3d_50_frozen = ResNet3d( + 50, None, pretrained2d=False, frozen_stages=frozen_stages) + resnet3d_50_frozen.init_weights() + resnet3d_50_frozen.train() + assert resnet3d_50_frozen.conv1.bn.training is False + for param in resnet3d_50_frozen.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(resnet3d_50_frozen, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + # test zero_init_residual + for m in resnet3d_50_frozen.modules(): + if hasattr(m, 'conv3'): + assert torch.equal(m.conv3.bn.weight, + torch.zeros_like(m.conv3.bn.weight)) + assert torch.equal(m.conv3.bn.bias, + torch.zeros_like(m.conv3.bn.bias)) + + # resnet3d frozen with depth 34 inference + input_shape = (1, 3, 6, 64, 64) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_34_frozen = resnet3d_34_frozen.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_34_frozen(imgs_gpu) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + else: + feat = resnet3d_34_frozen(imgs) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + + # resnet3d with depth 50 inference + input_shape = (1, 3, 6, 64, 64) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_50_frozen = resnet3d_50_frozen.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_50_frozen(imgs_gpu) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + else: + feat = resnet3d_50_frozen(imgs) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + + # resnet3d with depth 50 in caffe style inference + resnet3d_50_caffe = ResNet3d(50, None, pretrained2d=False, style='caffe') + resnet3d_50_caffe.init_weights() + resnet3d_50_caffe.train() + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_50_caffe = resnet3d_50_caffe.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_50_caffe(imgs_gpu) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + else: + feat = resnet3d_50_caffe(imgs) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + + # resnet3d with depth 34 in caffe style inference + resnet3d_34_caffe = ResNet3d(34, None, pretrained2d=False, style='caffe') + resnet3d_34_caffe.init_weights() + resnet3d_34_caffe.train() + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_34_caffe = resnet3d_34_caffe.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_34_caffe(imgs_gpu) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + else: + feat = resnet3d_34_caffe(imgs) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + + # resnet3d with depth with 3x3x3 inflate_style inference + resnet3d_50_1x1x1 = ResNet3d( + 50, None, pretrained2d=False, inflate_style='3x3x3') + resnet3d_50_1x1x1.init_weights() + resnet3d_50_1x1x1.train() + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_50_1x1x1 = resnet3d_50_1x1x1.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_50_1x1x1(imgs_gpu) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + else: + feat = resnet3d_50_1x1x1(imgs) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + + resnet3d_34_1x1x1 = ResNet3d( + 34, None, pretrained2d=False, inflate_style='3x3x3') + resnet3d_34_1x1x1.init_weights() + resnet3d_34_1x1x1.train() + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_34_1x1x1 = resnet3d_34_1x1x1.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_34_1x1x1(imgs_gpu) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + else: + feat = resnet3d_34_1x1x1(imgs) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + + # resnet3d with non-local module + non_local_cfg = dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='embedded_gaussian') + non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)) + resnet3d_nonlocal = ResNet3d( + 50, + None, + pretrained2d=False, + non_local=non_local, + non_local_cfg=non_local_cfg) + resnet3d_nonlocal.init_weights() + for layer_name in ['layer2', 'layer3']: + layer = getattr(resnet3d_nonlocal, layer_name) + for i, _ in enumerate(layer): + if i % 2 == 0: + assert hasattr(layer[i], 'non_local_block') + + feat = resnet3d_nonlocal(imgs) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + + +def test_resnet2plus1d_backbone(): + # Test r2+1d backbone + with pytest.raises(AssertionError): + # r2+1d does not support inflation + ResNet2Plus1d(50, None, pretrained2d=True) + + with pytest.raises(AssertionError): + # r2+1d requires conv(2+1)d module + ResNet2Plus1d( + 50, None, pretrained2d=False, conv_cfg=dict(type='Conv3d')) + + frozen_stages = 1 + r2plus1d_34_frozen = ResNet2Plus1d( + 34, + None, + conv_cfg=dict(type='Conv2plus1d'), + pretrained2d=False, + frozen_stages=frozen_stages, + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(1, 1, 1, 1), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 2, 2, 2)) + r2plus1d_34_frozen.init_weights() + r2plus1d_34_frozen.train() + assert r2plus1d_34_frozen.conv1.conv.bn_s.training is False + assert r2plus1d_34_frozen.conv1.bn.training is False + for param in r2plus1d_34_frozen.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(r2plus1d_34_frozen, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + input_shape = (1, 3, 8, 64, 64) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + r2plus1d_34_frozen = r2plus1d_34_frozen.cuda() + imgs_gpu = imgs.cuda() + feat = r2plus1d_34_frozen(imgs_gpu) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + else: + feat = r2plus1d_34_frozen(imgs) + assert feat.shape == torch.Size([1, 512, 1, 2, 2]) + + r2plus1d_50_frozen = ResNet2Plus1d( + 50, + None, + conv_cfg=dict(type='Conv2plus1d'), + pretrained2d=False, + conv1_kernel=(3, 7, 7), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(1, 1, 1, 1), + spatial_strides=(1, 2, 2, 2), + temporal_strides=(1, 2, 2, 2), + frozen_stages=frozen_stages) + r2plus1d_50_frozen.init_weights() + + r2plus1d_50_frozen.train() + assert r2plus1d_50_frozen.conv1.conv.bn_s.training is False + assert r2plus1d_50_frozen.conv1.bn.training is False + for param in r2plus1d_50_frozen.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(r2plus1d_50_frozen, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + input_shape = (1, 3, 8, 64, 64) + imgs = _demo_inputs(input_shape) + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + r2plus1d_50_frozen = r2plus1d_50_frozen.cuda() + imgs_gpu = imgs.cuda() + feat = r2plus1d_50_frozen(imgs_gpu) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + else: + feat = r2plus1d_50_frozen(imgs) + assert feat.shape == torch.Size([1, 2048, 1, 2, 2]) + + +def test_resnet_tsm_backbone(): + """Test resnet_tsm backbone.""" + with pytest.raises(NotImplementedError): + # shift_place must be block or blockres + resnet_tsm_50_block = ResNetTSM(50, shift_place='Block') + resnet_tsm_50_block.init_weights() + + from mmaction.models.backbones.resnet import Bottleneck + from mmaction.models.backbones.resnet_tsm import TemporalShift + + input_shape = (8, 3, 64, 64) + imgs = _demo_inputs(input_shape) + + # resnet_tsm with depth 50 + resnet_tsm_50 = ResNetTSM(50) + resnet_tsm_50.init_weights() + for layer_name in resnet_tsm_50.res_layers: + layer = getattr(resnet_tsm_50, layer_name) + blocks = list(layer.children()) + for block in blocks: + assert isinstance(block.conv1.conv, TemporalShift) + assert block.conv1.conv.num_segments == resnet_tsm_50.num_segments + assert block.conv1.conv.shift_div == resnet_tsm_50.shift_div + assert isinstance(block.conv1.conv.net, nn.Conv2d) + + # resnet_tsm with depth 50, no pretrained, shift_place is block + resnet_tsm_50_block = ResNetTSM(50, shift_place='block') + resnet_tsm_50_block.init_weights() + for layer_name in resnet_tsm_50_block.res_layers: + layer = getattr(resnet_tsm_50_block, layer_name) + blocks = list(layer.children()) + for block in blocks: + assert isinstance(block, TemporalShift) + assert block.num_segments == resnet_tsm_50_block.num_segments + assert block.num_segments == resnet_tsm_50_block.num_segments + assert block.shift_div == resnet_tsm_50_block.shift_div + assert isinstance(block.net, Bottleneck) + + # resnet_tsm with depth 50, no pretrained, use temporal_pool + resnet_tsm_50_temporal_pool = ResNetTSM(50, temporal_pool=True) + resnet_tsm_50_temporal_pool.init_weights() + for layer_name in resnet_tsm_50_temporal_pool.res_layers: + layer = getattr(resnet_tsm_50_temporal_pool, layer_name) + blocks = list(layer.children()) + + if layer_name == 'layer2': + assert len(blocks) == 2 + assert isinstance(blocks[1], nn.MaxPool3d) + blocks = copy.deepcopy(blocks[0]) + + for block in blocks: + assert isinstance(block.conv1.conv, TemporalShift) + if layer_name == 'layer1': + assert block.conv1.conv.num_segments == \ + resnet_tsm_50_temporal_pool.num_segments + else: + assert block.conv1.conv.num_segments == \ + resnet_tsm_50_temporal_pool.num_segments // 2 + assert block.conv1.conv.shift_div == resnet_tsm_50_temporal_pool.shift_div # noqa: E501 + assert isinstance(block.conv1.conv.net, nn.Conv2d) + + # resnet_tsm with non-local module + non_local_cfg = dict( + sub_sample=True, + use_scale=False, + norm_cfg=dict(type='BN3d', requires_grad=True), + mode='embedded_gaussian') + non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)) + resnet_tsm_nonlocal = ResNetTSM( + 50, non_local=non_local, non_local_cfg=non_local_cfg) + resnet_tsm_nonlocal.init_weights() + for layer_name in ['layer2', 'layer3']: + layer = getattr(resnet_tsm_nonlocal, layer_name) + for i, _ in enumerate(layer): + if i % 2 == 0: + assert isinstance(layer[i], NL3DWrapper) + + resnet_tsm_50_full = ResNetTSM( + 50, + non_local=non_local, + non_local_cfg=non_local_cfg, + temporal_pool=True) + resnet_tsm_50_full.init_weights() + + # TSM forword + feat = resnet_tsm_50(imgs) + assert feat.shape == torch.Size([8, 2048, 2, 2]) + + # TSM with non-local forward + feat = resnet_tsm_nonlocal(imgs) + assert feat.shape == torch.Size([8, 2048, 2, 2]) + + # TSM with temporal pool forward + feat = resnet_tsm_50_temporal_pool(imgs) + assert feat.shape == torch.Size([4, 2048, 2, 2]) + + # TSM with temporal pool + non-local forward + input_shape = (16, 3, 32, 32) + imgs = _demo_inputs(input_shape) + feat = resnet_tsm_50_full(imgs) + assert feat.shape == torch.Size([8, 2048, 1, 1]) + + +def test_slowfast_backbone(): + """Test SlowFast backbone.""" + with pytest.raises(TypeError): + # cfg should be a dict + ResNet3dSlowFast(None, slow_pathway=list(['foo', 'bar'])) + with pytest.raises(TypeError): + # pretrained should be a str + sf_50 = ResNet3dSlowFast(dict(foo='bar')) + sf_50.init_weights() + with pytest.raises(KeyError): + # pathway type should be implemented + ResNet3dSlowFast(None, slow_pathway=dict(type='resnext')) + + # test slowfast with slow inflated + sf_50_inflate = ResNet3dSlowFast( + None, + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained='torchvision://resnet50', + pretrained2d=True, + lateral=True, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1))) + sf_50_inflate.init_weights() + sf_50_inflate.train() + + # test slowfast with no lateral connection + sf_50_wo_lateral = ResNet3dSlowFast( + None, + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + lateral=False, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1))) + sf_50_wo_lateral.init_weights() + sf_50_wo_lateral.train() + + # slowfast w/o lateral connection inference test + input_shape = (1, 3, 8, 64, 64) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + sf_50_wo_lateral = sf_50_wo_lateral.cuda() + imgs_gpu = imgs.cuda() + feat = sf_50_wo_lateral(imgs_gpu) + else: + feat = sf_50_wo_lateral(imgs) + + assert isinstance(feat, tuple) + assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2]) + assert feat[1].shape == torch.Size([1, 256, 8, 2, 2]) + + # test slowfast with frozen stages config + frozen_slow = 3 + sf_50 = ResNet3dSlowFast( + None, + slow_pathway=dict( + type='resnet3d', + depth=50, + pretrained=None, + pretrained2d=True, + lateral=True, + conv1_kernel=(1, 7, 7), + dilations=(1, 1, 1, 1), + conv1_stride_t=1, + pool1_stride_t=1, + inflate=(0, 0, 1, 1), + frozen_stages=frozen_slow)) + sf_50.init_weights() + sf_50.train() + + for stage in range(1, sf_50.slow_path.num_stages): + lateral_name = sf_50.slow_path.lateral_connections[stage - 1] + conv_lateral = getattr(sf_50.slow_path, lateral_name) + for mod in conv_lateral.modules(): + if isinstance(mod, _BatchNorm): + if stage <= frozen_slow: + assert mod.training is False + else: + assert mod.training is True + for param in conv_lateral.parameters(): + if stage <= frozen_slow: + assert param.requires_grad is False + else: + assert param.requires_grad is True + + # test slowfast with normal config + sf_50 = ResNet3dSlowFast(None) + sf_50.init_weights() + sf_50.train() + + # slowfast inference test + input_shape = (1, 3, 8, 64, 64) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + sf_50 = sf_50.cuda() + imgs_gpu = imgs.cuda() + feat = sf_50(imgs_gpu) + else: + feat = sf_50(imgs) + + assert isinstance(feat, tuple) + assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2]) + assert feat[1].shape == torch.Size([1, 256, 8, 2, 2]) + + +def test_slowonly_backbone(): + """Test SlowOnly backbone.""" + with pytest.raises(AssertionError): + # SlowOnly should contain no lateral connection + ResNet3dSlowOnly(50, None, lateral=True) + + # test SlowOnly with normal config + so_50 = ResNet3dSlowOnly(50, None) + so_50.init_weights() + so_50.train() + + # SlowOnly inference test + input_shape = (1, 3, 8, 64, 64) + imgs = _demo_inputs(input_shape) + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + so_50 = so_50.cuda() + imgs_gpu = imgs.cuda() + feat = so_50(imgs_gpu) + else: + feat = so_50(imgs) + assert feat.shape == torch.Size([1, 2048, 8, 2, 2]) + + +def test_resnet_csn_backbone(): + """Test resnet_csn backbone.""" + with pytest.raises(ValueError): + # Bottleneck mode must be "ip" or "ir" + ResNet3dCSN(152, None, bottleneck_mode='id') + + input_shape = (2, 3, 6, 64, 64) + imgs = _demo_inputs(input_shape) + + resnet3d_csn_frozen = ResNet3dCSN( + 152, None, bn_frozen=True, norm_eval=True) + resnet3d_csn_frozen.train() + for m in resnet3d_csn_frozen.modules(): + if isinstance(m, _BatchNorm): + for param in m.parameters(): + assert param.requires_grad is False + + # Interaction-preserved channel-separated bottleneck block + resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip') + resnet3d_csn_ip.init_weights() + resnet3d_csn_ip.train() + for i, layer_name in enumerate(resnet3d_csn_ip.res_layers): + layers = getattr(resnet3d_csn_ip, layer_name) + num_blocks = resnet3d_csn_ip.stage_blocks[i] + assert len(layers) == num_blocks + for layer in layers: + assert isinstance(layer.conv2, nn.Sequential) + assert len(layer.conv2) == 2 + assert layer.conv2[1].groups == layer.planes + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_csn_ip = resnet3d_csn_ip.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_csn_ip(imgs_gpu) + assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) + else: + feat = resnet3d_csn_ip(imgs) + assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) + + # Interaction-reduced channel-separated bottleneck block + resnet3d_csn_ir = ResNet3dCSN(152, None, bottleneck_mode='ir') + resnet3d_csn_ir.init_weights() + resnet3d_csn_ir.train() + for i, layer_name in enumerate(resnet3d_csn_ir.res_layers): + layers = getattr(resnet3d_csn_ir, layer_name) + num_blocks = resnet3d_csn_ir.stage_blocks[i] + assert len(layers) == num_blocks + for layer in layers: + assert isinstance(layer.conv2, nn.Sequential) + assert len(layer.conv2) == 1 + assert layer.conv2[0].groups == layer.planes + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + resnet3d_csn_ir = resnet3d_csn_ir.cuda() + imgs_gpu = imgs.cuda() + feat = resnet3d_csn_ir(imgs_gpu) + assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) + else: + feat = resnet3d_csn_ir(imgs) + assert feat.shape == torch.Size([2, 2048, 1, 2, 2]) + + # Set training status = False + resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip') + resnet3d_csn_ip.init_weights() + resnet3d_csn_ip.train(False) + for module in resnet3d_csn_ip.children(): + assert module.training is False + + +def test_c3d_backbone(): + """Test c3d backbone.""" + input_shape = (1, 3, 16, 112, 112) + imgs = _demo_inputs(input_shape) + + # c3d inference test + c3d = C3D() + c3d.init_weights() + c3d.train() + feat = c3d(imgs) + assert feat.shape == torch.Size([1, 4096]) + + # c3d with bn inference test + c3d_bn = C3D(norm_cfg=dict(type='BN3d')) + c3d_bn.init_weights() + c3d_bn.train() + feat = c3d_bn(imgs) + assert feat.shape == torch.Size([1, 4096]) + + +def test_resnet_audio_backbone(): + """Test ResNetAudio backbone.""" + input_shape = (1, 1, 16, 16) + spec = _demo_inputs(input_shape) + # inference + audioonly = ResNetAudio(50, None) + audioonly.init_weights() + audioonly.train() + feat = audioonly(spec) + assert feat.shape == torch.Size([1, 1024, 2, 2]) + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_resnet_tin_backbone(): + """Test resnet_tin backbone.""" + with pytest.raises(AssertionError): + # num_segments should be positive + resnet_tin = ResNetTIN(50, num_segments=-1) + resnet_tin.init_weights() + + from mmaction.models.backbones.resnet_tin import (CombineNet, + TemporalInterlace) + + # resnet_tin with normal config + resnet_tin = ResNetTIN(50) + resnet_tin.init_weights() + for layer_name in resnet_tin.res_layers: + layer = getattr(resnet_tin, layer_name) + blocks = list(layer.children()) + for block in blocks: + assert isinstance(block.conv1.conv, CombineNet) + assert isinstance(block.conv1.conv.net1, TemporalInterlace) + assert ( + block.conv1.conv.net1.num_segments == resnet_tin.num_segments) + assert block.conv1.conv.net1.shift_div == resnet_tin.shift_div + + # resnet_tin with partial batchnorm + resnet_tin_pbn = ResNetTIN(50, partial_bn=True) + resnet_tin_pbn.train() + count_bn = 0 + for m in resnet_tin_pbn.modules(): + if isinstance(m, nn.BatchNorm2d): + count_bn += 1 + if count_bn >= 2: + assert m.training is False + assert m.weight.requires_grad is False + assert m.bias.requires_grad is False + else: + assert m.training is True + assert m.weight.requires_grad is True + assert m.bias.requires_grad is True + + input_shape = (8, 3, 64, 64) + imgs = _demo_inputs(input_shape).cuda() + resnet_tin = resnet_tin.cuda() + + # resnet_tin with normal cfg inference + feat = resnet_tin(imgs) + assert feat.shape == torch.Size([8, 2048, 2, 2]) + + +def _demo_inputs(input_shape=(1, 3, 64, 64)): + """Create a superset of inputs needed to run backbone. + + Args: + input_shape (tuple): input batch dimensions. + Default: (1, 3, 64, 64). + """ + imgs = np.random.random(input_shape) + imgs = torch.FloatTensor(imgs) + + return imgs diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_common_modules.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_common_modules.py new file mode 100644 index 0000000..1691a50 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_common_modules.py @@ -0,0 +1,35 @@ +import pytest +import torch + +from mmaction.models import Conv2plus1d, ConvAudio + + +def test_conv2plus1d(): + with pytest.raises(AssertionError): + # Length of kernel size, stride and padding must be the same + Conv2plus1d(3, 8, (2, 2)) + + conv_2plus1d = Conv2plus1d(3, 8, 2) + conv_2plus1d.init_weights() + + assert torch.equal(conv_2plus1d.bn_s.weight, + torch.ones_like(conv_2plus1d.bn_s.weight)) + assert torch.equal(conv_2plus1d.bn_s.bias, + torch.zeros_like(conv_2plus1d.bn_s.bias)) + + x = torch.rand(1, 3, 8, 256, 256) + output = conv_2plus1d(x) + assert output.shape == torch.Size([1, 8, 7, 255, 255]) + + +def test_conv_audio(): + conv_audio = ConvAudio(3, 8, 3) + conv_audio.init_weights() + + x = torch.rand(1, 3, 8, 8) + output = conv_audio(x) + assert output.shape == torch.Size([1, 16, 8, 8]) + + conv_audio_sum = ConvAudio(3, 8, 3, op='sum') + output = conv_audio_sum(x) + assert output.shape == torch.Size([1, 8, 8, 8]) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_head.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_head.py new file mode 100644 index 0000000..b05d85f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_head.py @@ -0,0 +1,283 @@ +import torch +import torch.nn as nn + +from mmaction.models import (AudioTSNHead, BaseHead, I3DHead, SlowFastHead, + TPNHead, TSMHead, TSNHead, X3DHead) + + +class ExampleHead(BaseHead): + # use a ExampleHead to success BaseHead + def init_weights(self): + pass + + def forward(self, x): + pass + + +def test_base_head(): + head = ExampleHead(3, 400, dict(type='CrossEntropyLoss')) + + cls_scores = torch.rand((3, 4)) + # When truth is non-empty then cls loss should be nonzero for random inputs + gt_labels = torch.LongTensor([2] * 3).squeeze() + losses = head.loss(cls_scores, gt_labels) + assert 'loss_cls' in losses.keys() + assert losses.get('loss_cls') > 0, 'cls loss should be non-zero' + + +def test_i3d_head(): + """Test loss method, layer construction, attributes and forward function in + i3d head.""" + i3d_head = I3DHead(num_classes=4, in_channels=2048) + i3d_head.init_weights() + + assert i3d_head.num_classes == 4 + assert i3d_head.dropout_ratio == 0.5 + assert i3d_head.in_channels == 2048 + assert i3d_head.init_std == 0.01 + + assert isinstance(i3d_head.dropout, nn.Dropout) + assert i3d_head.dropout.p == i3d_head.dropout_ratio + + assert isinstance(i3d_head.fc_cls, nn.Linear) + assert i3d_head.fc_cls.in_features == i3d_head.in_channels + assert i3d_head.fc_cls.out_features == i3d_head.num_classes + + assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d) + assert i3d_head.avg_pool.output_size == (1, 1, 1) + + input_shape = (3, 2048, 4, 7, 7) + feat = torch.rand(input_shape) + + # i3d head inference + cls_scores = i3d_head(feat) + assert cls_scores.shape == torch.Size([3, 4]) + + +def test_x3d_head(): + """Test loss method, layer construction, attributes and forward function in + x3d head.""" + x3d_head = X3DHead(in_channels=432, num_classes=4, fc1_bias=False) + x3d_head.init_weights() + + assert x3d_head.num_classes == 4 + assert x3d_head.dropout_ratio == 0.5 + assert x3d_head.in_channels == 432 + assert x3d_head.init_std == 0.01 + + assert isinstance(x3d_head.dropout, nn.Dropout) + assert x3d_head.dropout.p == x3d_head.dropout_ratio + + assert isinstance(x3d_head.fc1, nn.Linear) + assert x3d_head.fc1.in_features == x3d_head.in_channels + assert x3d_head.fc1.out_features == x3d_head.mid_channels + assert x3d_head.fc1.bias is None + + assert isinstance(x3d_head.fc2, nn.Linear) + assert x3d_head.fc2.in_features == x3d_head.mid_channels + assert x3d_head.fc2.out_features == x3d_head.num_classes + + assert isinstance(x3d_head.pool, nn.AdaptiveAvgPool3d) + assert x3d_head.pool.output_size == (1, 1, 1) + + input_shape = (3, 432, 4, 7, 7) + feat = torch.rand(input_shape) + + # i3d head inference + cls_scores = x3d_head(feat) + assert cls_scores.shape == torch.Size([3, 4]) + + +def test_slowfast_head(): + """Test loss method, layer construction, attributes and forward function in + slowfast head.""" + sf_head = SlowFastHead(num_classes=4, in_channels=2304) + sf_head.init_weights() + + assert sf_head.num_classes == 4 + assert sf_head.dropout_ratio == 0.8 + assert sf_head.in_channels == 2304 + assert sf_head.init_std == 0.01 + + assert isinstance(sf_head.dropout, nn.Dropout) + assert sf_head.dropout.p == sf_head.dropout_ratio + + assert isinstance(sf_head.fc_cls, nn.Linear) + assert sf_head.fc_cls.in_features == sf_head.in_channels + assert sf_head.fc_cls.out_features == sf_head.num_classes + + assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d) + assert sf_head.avg_pool.output_size == (1, 1, 1) + + input_shape = (3, 2048, 32, 7, 7) + feat_slow = torch.rand(input_shape) + + input_shape = (3, 256, 4, 7, 7) + feat_fast = torch.rand(input_shape) + + sf_head = SlowFastHead(num_classes=4, in_channels=2304) + cls_scores = sf_head((feat_slow, feat_fast)) + assert cls_scores.shape == torch.Size([3, 4]) + + +def test_tsn_head(): + """Test loss method, layer construction, attributes and forward function in + tsn head.""" + tsn_head = TSNHead(num_classes=4, in_channels=2048) + tsn_head.init_weights() + + assert tsn_head.num_classes == 4 + assert tsn_head.dropout_ratio == 0.4 + assert tsn_head.in_channels == 2048 + assert tsn_head.init_std == 0.01 + assert tsn_head.consensus.dim == 1 + assert tsn_head.spatial_type == 'avg' + + assert isinstance(tsn_head.dropout, nn.Dropout) + assert tsn_head.dropout.p == tsn_head.dropout_ratio + + assert isinstance(tsn_head.fc_cls, nn.Linear) + assert tsn_head.fc_cls.in_features == tsn_head.in_channels + assert tsn_head.fc_cls.out_features == tsn_head.num_classes + + assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d) + assert tsn_head.avg_pool.output_size == (1, 1) + + input_shape = (8, 2048, 7, 7) + feat = torch.rand(input_shape) + + # tsn head inference + num_segs = input_shape[0] + cls_scores = tsn_head(feat, num_segs) + assert cls_scores.shape == torch.Size([1, 4]) + + # Test multi-class recognition + multi_tsn_head = TSNHead( + num_classes=4, + in_channels=2048, + loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0), + multi_class=True, + label_smooth_eps=0.01) + multi_tsn_head.init_weights() + assert multi_tsn_head.num_classes == 4 + assert multi_tsn_head.dropout_ratio == 0.4 + assert multi_tsn_head.in_channels == 2048 + assert multi_tsn_head.init_std == 0.01 + assert multi_tsn_head.consensus.dim == 1 + + assert isinstance(multi_tsn_head.dropout, nn.Dropout) + assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio + + assert isinstance(multi_tsn_head.fc_cls, nn.Linear) + assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels + assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes + + assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d) + assert multi_tsn_head.avg_pool.output_size == (1, 1) + + input_shape = (8, 2048, 7, 7) + feat = torch.rand(input_shape) + + # multi-class tsn head inference + num_segs = input_shape[0] + cls_scores = tsn_head(feat, num_segs) + assert cls_scores.shape == torch.Size([1, 4]) + + +def test_tsn_head_audio(): + """Test loss method, layer construction, attributes and forward function in + tsn head.""" + tsn_head_audio = AudioTSNHead(num_classes=4, in_channels=5) + tsn_head_audio.init_weights() + + assert tsn_head_audio.num_classes == 4 + assert tsn_head_audio.dropout_ratio == 0.4 + assert tsn_head_audio.in_channels == 5 + assert tsn_head_audio.init_std == 0.01 + assert tsn_head_audio.spatial_type == 'avg' + + assert isinstance(tsn_head_audio.dropout, nn.Dropout) + assert tsn_head_audio.dropout.p == tsn_head_audio.dropout_ratio + + assert isinstance(tsn_head_audio.fc_cls, nn.Linear) + assert tsn_head_audio.fc_cls.in_features == tsn_head_audio.in_channels + assert tsn_head_audio.fc_cls.out_features == tsn_head_audio.num_classes + + assert isinstance(tsn_head_audio.avg_pool, nn.AdaptiveAvgPool2d) + assert tsn_head_audio.avg_pool.output_size == (1, 1) + + input_shape = (8, 5, 7, 7) + feat = torch.rand(input_shape) + + # tsn head inference + cls_scores = tsn_head_audio(feat) + assert cls_scores.shape == torch.Size([8, 4]) + + +def test_tsm_head(): + """Test loss method, layer construction, attributes and forward function in + tsm head.""" + tsm_head = TSMHead(num_classes=4, in_channels=2048) + tsm_head.init_weights() + + assert tsm_head.num_classes == 4 + assert tsm_head.dropout_ratio == 0.8 + assert tsm_head.in_channels == 2048 + assert tsm_head.init_std == 0.001 + assert tsm_head.consensus.dim == 1 + assert tsm_head.spatial_type == 'avg' + + assert isinstance(tsm_head.dropout, nn.Dropout) + assert tsm_head.dropout.p == tsm_head.dropout_ratio + + assert isinstance(tsm_head.fc_cls, nn.Linear) + assert tsm_head.fc_cls.in_features == tsm_head.in_channels + assert tsm_head.fc_cls.out_features == tsm_head.num_classes + + assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d) + assert tsm_head.avg_pool.output_size == 1 + + input_shape = (8, 2048, 7, 7) + feat = torch.rand(input_shape) + + # tsm head inference with no init + num_segs = input_shape[0] + cls_scores = tsm_head(feat, num_segs) + assert cls_scores.shape == torch.Size([1, 4]) + + # tsm head inference with init + tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True) + tsm_head.init_weights() + cls_scores = tsm_head(feat, num_segs) + assert cls_scores.shape == torch.Size([2, 4]) + + +def test_tpn_head(): + """Test loss method, layer construction, attributes and forward function in + tpn head.""" + tpn_head = TPNHead(num_classes=4, in_channels=2048) + tpn_head.init_weights() + + assert hasattr(tpn_head, 'avg_pool2d') + assert hasattr(tpn_head, 'avg_pool3d') + assert isinstance(tpn_head.avg_pool3d, nn.AdaptiveAvgPool3d) + assert tpn_head.avg_pool3d.output_size == (1, 1, 1) + assert tpn_head.avg_pool2d is None + + input_shape = (4, 2048, 7, 7) + feat = torch.rand(input_shape) + + # tpn head inference with num_segs + num_segs = 2 + cls_scores = tpn_head(feat, num_segs) + assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d) + assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7) + assert cls_scores.shape == torch.Size([2, 4]) + + # tpn head inference with no num_segs + input_shape = (2, 2048, 3, 7, 7) + feat = torch.rand(input_shape) + cls_scores = tpn_head(feat) + assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d) + assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7) + assert cls_scores.shape == torch.Size([2, 4]) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_inference.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_inference.py new file mode 100644 index 0000000..b6f94bf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_inference.py @@ -0,0 +1,72 @@ +import mmcv +import pytest +import torch +import torch.nn as nn + +from mmaction.apis import inference_recognizer, init_recognizer + +video_config_file = 'configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501 +frame_config_file = 'configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501 +label_path = 'demo/label_map.txt' +video_path = 'demo/demo.mp4' + + +def test_init_recognizer(): + with pytest.raises(TypeError): + # config must be a filename or Config object + init_recognizer(dict(config_file=None)) + + with pytest.raises(RuntimeError): + # input data type should be consist with the dataset type + init_recognizer(frame_config_file) + + with pytest.raises(RuntimeError): + # input data type should be consist with the dataset type + init_recognizer(video_config_file, use_frames=True) + + if torch.cuda.is_available(): + device = 'cuda:0' + else: + device = 'cpu' + + model = init_recognizer(video_config_file, None, device) + + config = mmcv.Config.fromfile(video_config_file) + config.model.backbone.pretrained = None + + isinstance(model, nn.Module) + if torch.cuda.is_available(): + assert next(model.parameters()).is_cuda is True + else: + assert next(model.parameters()).is_cuda is False + assert model.cfg.model.backbone.pretrained is None + + +def test_inference_recognizer(): + if torch.cuda.is_available(): + device = 'cuda:0' + else: + device = 'cpu' + model = init_recognizer(video_config_file, None, device) + + with pytest.raises(RuntimeError): + # video path doesn't exist + inference_recognizer(model, 'missing.mp4', label_path) + + with pytest.raises(RuntimeError): + # ``video_path`` should be consist with the ``use_frames`` + inference_recognizer(model, video_path, label_path, use_frames=True) + + with pytest.raises(RuntimeError): + # ``video_path`` should be consist with the ``use_frames`` + inference_recognizer(model, 'demo/', label_path) + + for ops in model.cfg.data.test.pipeline: + if ops['type'] in ('TenCrop', 'ThreeCrop'): + # Use CenterCrop to reduce memory in order to pass CI + ops['type'] = 'CenterCrop' + + top5_label = inference_recognizer(model, video_path, label_path) + scores = [item[1] for item in top5_label] + assert len(top5_label) == 5 + assert scores == sorted(scores, reverse=True) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_localizers.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_localizers.py new file mode 100644 index 0000000..658dec8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_localizers.py @@ -0,0 +1,366 @@ +import copy + +import mmcv +import numpy as np +import pytest +import torch + +from mmaction.models import build_localizer +from mmaction.models.localizers.utils import post_processing + + +def test_tem(): + model_cfg = dict( + type='TEM', + temporal_dim=100, + boundary_ratio=0.1, + tem_feat_dim=400, + tem_hidden_dim=512, + tem_match_threshold=0.5) + + localizer_tem = build_localizer(model_cfg) + raw_feature = torch.rand(8, 400, 100) + gt_bbox = torch.Tensor([[[1.0, 3.0], [3.0, 5.0]]] * 8) + losses = localizer_tem(raw_feature, gt_bbox) + assert isinstance(losses, dict) + + # Test forward test + video_meta = [{'video_name': 'v_test'}] + with torch.no_grad(): + for one_raw_feature in raw_feature: + one_raw_feature = one_raw_feature.reshape(1, 400, 100) + localizer_tem( + one_raw_feature, video_meta=video_meta, return_loss=False) + + +def test_pem(): + model_cfg = dict( + type='PEM', + pem_feat_dim=32, + pem_hidden_dim=256, + pem_u_ratio_m=1, + pem_u_ratio_l=2, + pem_high_temporal_iou_threshold=0.6, + pem_low_temporal_iou_threshold=2.2, + soft_nms_alpha=0.75, + soft_nms_low_threshold=0.65, + soft_nms_high_threshold=0.9, + post_process_top_k=100) + + localizer_pem = build_localizer(model_cfg) + bsp_feature = torch.rand(8, 100, 32) + reference_temporal_iou = torch.rand(8, 100) + losses = localizer_pem(bsp_feature, reference_temporal_iou) + assert isinstance(losses, dict) + + # Test forward test + tmin = torch.rand(100) + tmax = torch.rand(100) + tmin_score = torch.rand(100) + tmax_score = torch.rand(100) + + video_meta = [ + dict( + video_name='v_test', + duration_second=100, + duration_frame=1000, + annotations=[{ + 'segment': [0.3, 0.6], + 'label': 'Rock climbing' + }], + feature_frame=900) + ] + with torch.no_grad(): + for one_bsp_feature in bsp_feature: + one_bsp_feature = one_bsp_feature.reshape(1, 100, 32) + localizer_pem( + one_bsp_feature, + tmin=tmin, + tmax=tmax, + tmin_score=tmin_score, + tmax_score=tmax_score, + video_meta=video_meta, + return_loss=False) + + +def test_bmn(): + model_cfg = dict( + type='BMN', + temporal_dim=100, + boundary_ratio=0.5, + num_samples=32, + num_samples_per_bin=3, + feat_dim=400, + soft_nms_alpha=0.4, + soft_nms_low_threshold=0.5, + soft_nms_high_threshold=0.9, + post_process_top_k=100) + if torch.cuda.is_available(): + localizer_bmn = build_localizer(model_cfg).cuda() + raw_feature = torch.rand(8, 400, 100).cuda() + gt_bbox = np.array([[[0.1, 0.3], [0.375, 0.625]]] * 8) + losses = localizer_bmn(raw_feature, gt_bbox) + assert isinstance(losses, dict) + + # Test forward test + video_meta = [ + dict( + video_name='v_test', + duration_second=100, + duration_frame=960, + feature_frame=960) + ] + with torch.no_grad(): + one_raw_feature = torch.rand(1, 400, 100).cuda() + localizer_bmn( + one_raw_feature, + gt_bbox=None, + video_meta=video_meta, + return_loss=False) + else: + localizer_bmn = build_localizer(model_cfg) + raw_feature = torch.rand(8, 400, 100) + gt_bbox = torch.Tensor([[[0.1, 0.3], [0.375, 0.625]]] * 8) + losses = localizer_bmn(raw_feature, gt_bbox) + assert isinstance(losses, dict) + + # Test forward test + video_meta = [ + dict( + video_name='v_test', + duration_second=100, + duration_frame=960, + feature_frame=960) + ] + with torch.no_grad(): + one_raw_feature = torch.rand(1, 400, 100) + localizer_bmn( + one_raw_feature, + gt_bbox=None, + video_meta=video_meta, + return_loss=False) + + +def test_post_processing(): + # test with multiple results + result = np.array([[0., 1., 1., 1., 0.5, 0.5], [0., 0.4, 1., 1., 0.4, 0.4], + [0., 0.95, 1., 1., 0.6, 0.6]]) + video_info = dict( + video_name='v_test', + duration_second=100, + duration_frame=960, + feature_frame=960) + proposal_list = post_processing(result, video_info, 0.75, 0.65, 0.9, 2, 16) + assert isinstance(proposal_list[0], dict) + assert proposal_list[0]['score'] == 0.6 + assert proposal_list[0]['segment'] == [0., 95.0] + assert isinstance(proposal_list[1], dict) + assert proposal_list[1]['score'] == 0.4 + assert proposal_list[1]['segment'] == [0., 40.0] + + # test with only result + result = np.array([[0., 1., 1., 1., 0.5, 0.5]]) + video_info = dict( + video_name='v_test', + duration_second=100, + duration_frame=960, + feature_frame=960) + proposal_list = post_processing(result, video_info, 0.75, 0.65, 0.9, 1, 16) + assert isinstance(proposal_list[0], dict) + assert proposal_list[0]['score'] == 0.5 + assert proposal_list[0]['segment'] == [0., 100.0] + + +def test_ssn_train(): + train_cfg = mmcv.ConfigDict( + dict( + ssn=dict( + assigner=dict( + positive_iou_threshold=0.7, + background_iou_threshold=0.01, + incomplete_iou_threshold=0.3, + background_coverage_threshold=0.02, + incomplete_overlap_threshold=0.01), + sampler=dict( + num_per_video=8, + positive_ratio=1, + background_ratio=1, + incomplete_ratio=6, + add_gt_as_proposals=True), + loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1), + debug=False))) + base_model_cfg = dict( + type='SSN', + backbone=dict( + type='ResNet', pretrained=None, depth=18, norm_eval=True), + spatial_type='avg', + dropout_ratio=0.8, + loss_cls=dict(type='SSNLoss'), + cls_head=dict( + type='SSNHead', + dropout_ratio=0., + in_channels=512, + num_classes=20, + consensus=dict( + type='STPPTrain', + stpp_stage=(1, 1, 1), + num_segments_list=(2, 5, 2)), + use_regression=True), + train_cfg=train_cfg) + dropout_cfg = copy.deepcopy(base_model_cfg) + dropout_cfg['dropout_ratio'] = 0 + dropout_cfg['cls_head']['dropout_ratio'] = 0.5 + non_regression_cfg = copy.deepcopy(base_model_cfg) + non_regression_cfg['cls_head']['use_regression'] = False + + imgs = torch.rand(1, 8, 9, 3, 224, 224) + proposal_scale_factor = torch.Tensor([[[1.0345, 1.0345], [1.0028, 0.0028], + [1.0013, 1.0013], [1.0008, 1.0008], + [0.3357, 1.0006], [1.0006, 1.0006], + [0.0818, 1.0005], [1.0030, + 1.0030]]]) + proposal_type = torch.Tensor([[0, 1, 1, 1, 1, 1, 1, 2]]) + proposal_labels = torch.LongTensor([[8, 8, 8, 8, 8, 8, 8, 0]]) + reg_targets = torch.Tensor([[[0.2929, 0.2694], [0.0000, 0.0000], + [0.0000, 0.0000], [0.0000, 0.0000], + [0.0000, 0.0000], [0.0000, 0.0000], + [0.0000, 0.0000], [0.0000, 0.0000]]]) + + localizer_ssn = build_localizer(base_model_cfg) + localizer_ssn_dropout = build_localizer(dropout_cfg) + localizer_ssn_non_regression = build_localizer(non_regression_cfg) + + if torch.cuda.is_available(): + localizer_ssn = localizer_ssn.cuda() + localizer_ssn_dropout = localizer_ssn_dropout.cuda() + localizer_ssn_non_regression = localizer_ssn_non_regression.cuda() + imgs = imgs.cuda() + proposal_scale_factor = proposal_scale_factor.cuda() + proposal_type = proposal_type.cuda() + proposal_labels = proposal_labels.cuda() + reg_targets = reg_targets.cuda() + + # Train normal case + losses = localizer_ssn( + imgs, + proposal_scale_factor=proposal_scale_factor, + proposal_type=proposal_type, + proposal_labels=proposal_labels, + reg_targets=reg_targets) + assert isinstance(losses, dict) + + # Train SSN without dropout in model, with dropout in head + losses = localizer_ssn_dropout( + imgs, + proposal_scale_factor=proposal_scale_factor, + proposal_type=proposal_type, + proposal_labels=proposal_labels, + reg_targets=reg_targets) + assert isinstance(losses, dict) + + # Train SSN model without regression + losses = localizer_ssn_non_regression( + imgs, + proposal_scale_factor=proposal_scale_factor, + proposal_type=proposal_type, + proposal_labels=proposal_labels, + reg_targets=reg_targets) + assert isinstance(losses, dict) + + +def test_ssn_test(): + test_cfg = mmcv.ConfigDict( + dict( + ssn=dict( + sampler=dict(test_interval=6, batch_size=16), + evaluater=dict( + top_k=2000, + nms=0.2, + softmax_before_filter=True, + cls_score_dict=None, + cls_top_k=2)))) + base_model_cfg = dict( + type='SSN', + backbone=dict( + type='ResNet', pretrained=None, depth=18, norm_eval=True), + spatial_type='avg', + dropout_ratio=0.8, + cls_head=dict( + type='SSNHead', + dropout_ratio=0., + in_channels=512, + num_classes=20, + consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)), + use_regression=True), + test_cfg=test_cfg) + maxpool_model_cfg = copy.deepcopy(base_model_cfg) + maxpool_model_cfg['spatial_type'] = 'max' + non_regression_cfg = copy.deepcopy(base_model_cfg) + non_regression_cfg['cls_head']['use_regression'] = False + non_regression_cfg['cls_head']['consensus']['use_regression'] = False + tuple_stage_cfg = copy.deepcopy(base_model_cfg) + tuple_stage_cfg['cls_head']['consensus']['stpp_stage'] = (1, (1, 2), 1) + str_stage_cfg = copy.deepcopy(base_model_cfg) + str_stage_cfg['cls_head']['consensus']['stpp_stage'] = ('error', ) + + imgs = torch.rand(1, 8, 3, 224, 224) + relative_proposal_list = torch.Tensor([[[0.2500, 0.6250], [0.3750, + 0.7500]]]) + scale_factor_list = torch.Tensor([[[1.0000, 1.0000], [1.0000, 0.2661]]]) + proposal_tick_list = torch.LongTensor([[[1, 2, 5, 7], [20, 30, 60, 80]]]) + reg_norm_consts = torch.Tensor([[[-0.0603, 0.0325], [0.0752, 0.1596]]]) + + localizer_ssn = build_localizer(base_model_cfg) + localizer_ssn_maxpool = build_localizer(maxpool_model_cfg) + localizer_ssn_non_regression = build_localizer(non_regression_cfg) + localizer_ssn_tuple_stage_cfg = build_localizer(tuple_stage_cfg) + with pytest.raises(ValueError): + build_localizer(str_stage_cfg) + + if torch.cuda.is_available(): + localizer_ssn = localizer_ssn.cuda() + localizer_ssn_maxpool = localizer_ssn_maxpool.cuda() + localizer_ssn_non_regression = localizer_ssn_non_regression.cuda() + localizer_ssn_tuple_stage_cfg = localizer_ssn_tuple_stage_cfg.cuda() + imgs = imgs.cuda() + relative_proposal_list = relative_proposal_list.cuda() + scale_factor_list = scale_factor_list.cuda() + proposal_tick_list = proposal_tick_list.cuda() + reg_norm_consts = reg_norm_consts.cuda() + + with torch.no_grad(): + # Test normal case + localizer_ssn( + imgs, + relative_proposal_list=relative_proposal_list, + scale_factor_list=scale_factor_list, + proposal_tick_list=proposal_tick_list, + reg_norm_consts=reg_norm_consts, + return_loss=False) + + # Test SSN model with max spatial pooling + localizer_ssn_maxpool( + imgs, + relative_proposal_list=relative_proposal_list, + scale_factor_list=scale_factor_list, + proposal_tick_list=proposal_tick_list, + reg_norm_consts=reg_norm_consts, + return_loss=False) + + # Test SSN model without regression + localizer_ssn_non_regression( + imgs, + relative_proposal_list=relative_proposal_list, + scale_factor_list=scale_factor_list, + proposal_tick_list=proposal_tick_list, + reg_norm_consts=reg_norm_consts, + return_loss=False) + + # Test SSN model with tuple stage cfg. + localizer_ssn_tuple_stage_cfg( + imgs, + relative_proposal_list=relative_proposal_list, + scale_factor_list=scale_factor_list, + proposal_tick_list=proposal_tick_list, + reg_norm_consts=reg_norm_consts, + return_loss=False) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_neck.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_neck.py new file mode 100644 index 0000000..2fd014e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_neck.py @@ -0,0 +1,99 @@ +import copy + +import numpy as np +import pytest +import torch + +from mmaction.models import TPN + + +def test_tpn(): + """Test TPN backbone.""" + + tpn_cfg = dict( + in_channels=(1024, 2048), + out_channels=1024, + spatial_modulation_cfg=dict( + in_channels=(1024, 2048), out_channels=2048), + temporal_modulation_cfg=dict(downsample_scales=(8, 8)), + upsample_cfg=dict(scale_factor=(1, 1, 1)), + downsample_cfg=dict(downsample_scale=(1, 1, 1)), + level_fusion_cfg=dict( + in_channels=(1024, 1024), + mid_channels=(1024, 1024), + out_channels=2048, + downsample_scales=((1, 1, 1), (1, 1, 1))), + aux_head_cfg=dict(out_channels=400, loss_weight=0.5)) + + with pytest.raises(AssertionError): + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_cfg_['in_channels'] = list(tpn_cfg_['in_channels']) + TPN(**tpn_cfg_) + + with pytest.raises(AssertionError): + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_cfg_['out_channels'] = float(tpn_cfg_['out_channels']) + TPN(**tpn_cfg_) + + with pytest.raises(AssertionError): + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_cfg_['downsample_cfg']['downsample_position'] = 'unsupport' + TPN(**tpn_cfg_) + + for k in tpn_cfg: + if not k.endswith('_cfg'): + continue + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_cfg_[k] = list() + with pytest.raises(AssertionError): + TPN(**tpn_cfg_) + + with pytest.raises(ValueError): + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_cfg_['flow_type'] = 'unsupport' + TPN(**tpn_cfg_) + + target_shape = (32, 1) + target = _demo_inputs(target_shape).long().squeeze() + x0_shape = (32, 1024, 1, 4, 4) + x1_shape = (32, 2048, 1, 2, 2) + x0 = _demo_inputs(x0_shape) + x1 = _demo_inputs(x1_shape) + x = [x0, x1] + + # ResNetTPN with 'cascade' flow_type + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_cascade = TPN(**tpn_cfg_) + feat, loss_aux = tpn_cascade(x, target) + assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) + assert len(loss_aux) == 1 + + # ResNetTPN with 'parallel' flow_type + tpn_cfg_ = copy.deepcopy(tpn_cfg) + tpn_parallel = TPN(flow_type='parallel', **tpn_cfg_) + feat, loss_aux = tpn_parallel(x, target) + assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) + assert len(loss_aux) == 1 + + # ResNetTPN with 'cascade' flow_type and target is None + feat, loss_aux = tpn_cascade(x, None) + assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) + assert len(loss_aux) == 0 + + # ResNetTPN with 'parallel' flow_type and target is None + feat, loss_aux = tpn_parallel(x, None) + assert feat.shape == torch.Size([32, 2048, 1, 2, 2]) + assert len(loss_aux) == 0 + + +def _demo_inputs(input_shape=(1, 3, 64, 64)): + """Create a superset of inputs needed to run backbone. + + Args: + input_shape (tuple): input batch dimensions. + Default: (1, 3, 64, 64). + """ + imgs = np.random.random(input_shape) + imgs = torch.FloatTensor(imgs) + + return imgs diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_models/test_recognizers.py b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_recognizers.py new file mode 100644 index 0000000..b173170 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_models/test_recognizers.py @@ -0,0 +1,504 @@ +import os.path as osp + +import mmcv +import numpy as np +import pytest +import torch +import torch.nn.functional as F + +from mmaction.models import BaseRecognizer, build_recognizer + + +class ExampleRecognizer(BaseRecognizer): + + def __init__(self, train_cfg, test_cfg): + super(BaseRecognizer, self).__init__() + # reconstruct `__init__()` method in BaseRecognizer to avoid building + # backbone and head which are useless to ExampleRecognizer, + # since ExampleRecognizer is only used for model-unrelated methods + # (like `average_clip`) testing. + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def forward_train(self, imgs, labels): + pass + + def forward_test(self, imgs): + pass + + def forward_gradcam(self, imgs): + pass + + +def _get_recognizer_cfg(fname): + """Grab configs necessary to create a recognizer. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__))) + config_dpath = osp.join(repo_dpath, 'configs/recognition') + config_fpath = osp.join(config_dpath, fname) + if not osp.exists(config_dpath): + raise Exception('Cannot find config path') + config = mmcv.Config.fromfile(config_fpath) + return config.model, config.train_cfg, config.test_cfg + + +def _get_audio_recognizer_cfg(fname): + """Grab configs necessary to create a audio recognizer. + + These are deep copied to allow for safe modification of parameters without + influencing other tests. + """ + repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__))) + config_dpath = osp.join(repo_dpath, 'configs/recognition_audio/') + config_fpath = osp.join(config_dpath, fname) + if not osp.exists(config_dpath): + raise Exception('Cannot find config path') + config = mmcv.Config.fromfile(config_fpath) + return config.model, config.train_cfg, config.test_cfg + + +def test_base_recognizer(): + cls_score = torch.rand(5, 400) + with pytest.raises(KeyError): + # "average_clips" must defined in test_cfg keys + wrong_test_cfg = dict(clip='score') + recognizer = ExampleRecognizer(None, wrong_test_cfg) + recognizer.average_clip(cls_score) + + with pytest.raises(ValueError): + # unsupported average clips type + wrong_test_cfg = dict(average_clips='softmax') + recognizer = ExampleRecognizer(None, wrong_test_cfg) + recognizer.average_clip(cls_score) + + with pytest.raises(ValueError): + # Label should not be None + recognizer = ExampleRecognizer(None, None) + recognizer(torch.tensor(0)) + + # average_clips='score' + test_cfg = dict(average_clips='score') + recognizer = ExampleRecognizer(None, test_cfg) + score = recognizer.average_clip(cls_score, num_segs=5) + assert torch.equal(score, cls_score.mean(dim=0, keepdim=True)) + + # average_clips='prob' + test_cfg = dict(average_clips='prob') + recognizer = ExampleRecognizer(None, test_cfg) + score = recognizer.average_clip(cls_score, num_segs=5) + assert torch.equal(score, + F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True)) + + +def test_tsn(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py') + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 3, 32, 32) + demo_inputs = generate_demo_inputs(input_shape) + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_i3d(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py') + model['backbone']['pretrained2d'] = False + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 3, 8, 32, 32) + demo_inputs = generate_demo_inputs(input_shape, '3D') + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + recognizer = recognizer.cuda() + imgs = imgs.cuda() + gt_labels = gt_labels.cuda() + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + else: + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_r2plus1d(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py') + model['backbone']['pretrained2d'] = False + model['backbone']['pretrained'] = None + model['backbone']['norm_cfg'] = dict(type='BN3d') + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 3, 8, 32, 32) + demo_inputs = generate_demo_inputs(input_shape, '3D') + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + recognizer = recognizer.cuda() + imgs = imgs.cuda() + gt_labels = gt_labels.cuda() + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + else: + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_slowfast(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py') + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 3, 8, 32, 32) + demo_inputs = generate_demo_inputs(input_shape, '3D') + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + recognizer = recognizer.cuda() + imgs = imgs.cuda() + gt_labels = gt_labels.cuda() + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + else: + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_tsm(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py') + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 8, 3, 32, 32) + demo_inputs = generate_demo_inputs(input_shape) + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # test twice sample + 3 crops + input_shape = (2, 48, 3, 32, 32) + demo_inputs = generate_demo_inputs(input_shape) + imgs = demo_inputs['imgs'] + + test_cfg = dict(average_clips='prob') + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_csn(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py') + model['backbone']['pretrained2d'] = False + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 3, 8, 32, 32) + demo_inputs = generate_demo_inputs(input_shape, '3D') + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + # parrots 3dconv is only implemented on gpu + if torch.__version__ == 'parrots': + if torch.cuda.is_available(): + recognizer = recognizer.cuda() + imgs = imgs.cuda() + gt_labels = gt_labels.cuda() + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + else: + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_tpn(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py') + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 8, 3, 224, 224) + demo_inputs = generate_demo_inputs(input_shape) + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py') + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 8, 3, 1, 224, 224) + demo_inputs = generate_demo_inputs(input_shape, '3D') + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def test_audio_recognizer(): + model, train_cfg, test_cfg = _get_audio_recognizer_cfg( + 'resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py') + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 1, 128, 80) + demo_inputs = generate_demo_inputs(input_shape, model_type='audio') + + audios = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + losses = recognizer(audios, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + audio_list = [audio[None, :] for audio in audios] + for one_spectro in audio_list: + recognizer(one_spectro, None, return_loss=False) + + +def test_c3d(): + model, train_cfg, test_cfg = _get_recognizer_cfg( + 'c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py') + model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + model, train_cfg=train_cfg, test_cfg=test_cfg) + + input_shape = (1, 3, 3, 16, 112, 112) + demo_inputs = generate_demo_inputs(input_shape, '3D') + + imgs = demo_inputs['imgs'] + gt_labels = demo_inputs['gt_labels'] + + losses = recognizer(imgs, gt_labels) + assert isinstance(losses, dict) + + # Test forward test + with torch.no_grad(): + img_list = [img[None, :] for img in imgs] + for one_img in img_list: + recognizer(one_img, None, return_loss=False) + + # Test forward gradcam + recognizer(imgs, gradcam=True) + for one_img in img_list: + recognizer(one_img, gradcam=True) + + +def generate_demo_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'): + """Create a superset of inputs needed to run test or train batches. + + Args: + input_shape (tuple): input batch dimensions. + Default: (1, 250, 3, 224, 224). + model_type (str): Model type for data generation, from {'2D', '3D'}. + Default:'2D' + """ + if len(input_shape) == 5: + (N, L, C, H, W) = input_shape + elif len(input_shape) == 6: + (N, M, C, L, H, W) = input_shape + + imgs = np.random.random(input_shape) + + if model_type == '2D': + gt_labels = torch.LongTensor([2] * N) + elif model_type == '3D': + gt_labels = torch.LongTensor([2] * M) + elif model_type == 'audio': + gt_labels = torch.LongTensor([2] * L) + else: + raise ValueError(f'Data type {model_type} is not available') + + inputs = {'imgs': torch.FloatTensor(imgs), 'gt_labels': gt_labels} + return inputs diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_apis_test.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_apis_test.py new file mode 100644 index 0000000..29bb376 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_apis_test.py @@ -0,0 +1,106 @@ +import sys +from unittest.mock import MagicMock, Mock, patch + +import pytest +import torch +import torch.nn as nn +from torch.utils.data import DataLoader, Dataset + +from mmaction.apis.test import (collect_results_cpu, multi_gpu_test, + single_gpu_test) + + +class OldStyleModel(nn.Module): + + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(3, 3, 1) + self.cnt = 0 + + def forward(self, return_loss, **kwargs): + result = [self.cnt] + self.cnt += 1 + return result + + +class Model(OldStyleModel): + + def train_step(self): + pass + + def val_step(self): + pass + + +class ExampleDataset(Dataset): + + def __init__(self): + self.index = 0 + self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6] + + def __getitem__(self, idx): + results = dict(imgs=torch.tensor([1])) + return results + + def __len__(self): + return len(self.eval_result) + + +def test_single_gpu_test(): + test_dataset = ExampleDataset() + loader = DataLoader(test_dataset, batch_size=1) + model = Model() + + results = single_gpu_test(model, loader) + assert results == list(range(8)) + + +def mock_tensor_without_cuda(*args, **kwargs): + if 'device' not in kwargs: + return torch.Tensor(*args) + return torch.IntTensor(*args, device='cpu') + + +@patch('mmaction.apis.test.collect_results_gpu', + Mock(return_value=list(range(8)))) +@patch('mmaction.apis.test.collect_results_cpu', + Mock(return_value=list(range(8)))) +def test_multi_gpu_test(): + test_dataset = ExampleDataset() + loader = DataLoader(test_dataset, batch_size=1) + model = Model() + + results = multi_gpu_test(model, loader) + assert results == list(range(8)) + + results = multi_gpu_test(model, loader, gpu_collect=False) + assert results == list(range(8)) + + +@patch('mmcv.runner.get_dist_info', Mock(return_value=(0, 1))) +@patch('torch.distributed.broadcast', MagicMock) +@patch('torch.distributed.barrier', Mock) +@pytest.mark.skipif( + sys.version_info[:2] == (3, 8), reason='Not for python 3.8') +def test_collect_results_cpu(): + + def content_for_unittest(): + results_part = list(range(8)) + size = 8 + + results = collect_results_cpu(results_part, size) + assert results == list(range(8)) + + results = collect_results_cpu(results_part, size, 'unittest') + assert results == list(range(8)) + + if not torch.cuda.is_available(): + with patch( + 'torch.full', + Mock( + return_value=torch.full( + (512, ), 32, dtype=torch.uint8, device='cpu'))): + with patch('torch.tensor', mock_tensor_without_cuda): + content_for_unittest() + else: + content_for_unittest() diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_config.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_config.py new file mode 100644 index 0000000..7045b52 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_config.py @@ -0,0 +1,76 @@ +import glob +import os +import os.path as osp + +import mmcv +import torch.nn as nn + +from mmaction.models import build_localizer, build_recognizer + + +def _get_config_path(): + """Find the predefined recognizer config path.""" + repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) + config_dpath = osp.join(repo_dir, 'configs') + if not osp.exists(config_dpath): + raise Exception('Cannot find config path') + config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py'))) + config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths] + print(f'Using {len(config_names)} config files') + config_fpaths = [ + osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths + ] + return config_fpaths + + +def test_config_build_recognizer(): + """Test that all mmaction models defined in the configs can be + initialized.""" + repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) + config_dpath = osp.join(repo_dir, 'configs/recognition') + if not osp.exists(config_dpath): + raise Exception('Cannot find config path') + config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py'))) + # test all config file in `configs` directory + for config_fpath in config_fpaths: + config_mod = mmcv.Config.fromfile(config_fpath) + print(f'Building recognizer, config_fpath = {config_fpath!r}') + + # Remove pretrained keys to allow for testing in an offline environment + if 'pretrained' in config_mod.model['backbone']: + config_mod.model['backbone']['pretrained'] = None + + recognizer = build_recognizer( + config_mod.model, + train_cfg=config_mod.train_cfg, + test_cfg=config_mod.test_cfg) + assert isinstance(recognizer, nn.Module) + + +def _get_config_path_for_localizer(): + """Find the predefined localizer config path for localizer.""" + repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) + config_dpath = osp.join(repo_dir, 'configs/localization') + if not osp.exists(config_dpath): + raise Exception('Cannot find config path') + config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py'))) + config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths] + print(f'Using {len(config_names)} config files') + config_fpaths = [ + osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths + ] + return config_fpaths + + +def test_config_build_localizer(): + """Test that all mmaction models defined in the configs can be + initialized.""" + config_fpaths = _get_config_path_for_localizer() + + # test all config file in `configs/localization` directory + for config_fpath in config_fpaths: + config_mod = mmcv.Config.fromfile(config_fpath) + print(f'Building localizer, config_fpath = {config_fpath!r}') + if config_mod.get('model', None): + localizer = build_localizer(config_mod.model) + assert isinstance(localizer, nn.Module) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_eval_hook.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_eval_hook.py new file mode 100644 index 0000000..af02be5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_eval_hook.py @@ -0,0 +1,379 @@ +import os.path as osp +import tempfile +import unittest.mock as mock +from unittest.mock import MagicMock, patch + +import mmcv +import pytest +import torch +import torch.nn as nn +from mmcv.runner import EpochBasedRunner, build_optimizer +from mmcv.utils import get_logger +from torch.utils.data import DataLoader, Dataset + +from mmaction.core import DistEpochEvalHook, EpochEvalHook + + +class ExampleDataset(Dataset): + + def __init__(self): + self.index = 0 + self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6] + + def __getitem__(self, idx): + results = dict(imgs=torch.tensor([1])) + return results + + def __len__(self): + return 1 + + @mock.create_autospec + def evaluate(self, results, logger=None): + pass + + +class EvalDataset(ExampleDataset): + + def evaluate(self, results, logger=None): + acc = self.eval_result[self.index] + output = dict(acc=acc, index=self.index, score=acc) + self.index += 1 + return output + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.conv = nn.Linear(1, 1) + self.test_cfg = None + + def forward(self, imgs, return_loss=False): + return imgs + + def train_step(self, data_batch, optimizer, **kwargs): + outputs = { + 'loss': 0.5, + 'log_vars': { + 'accuracy': 0.98 + }, + 'num_samples': 1 + } + return outputs + + +def _build_demo_runner(): + + class Model(nn.Module): + + def __init__(self): + super().__init__() + self.linear = nn.Linear(2, 1) + + def forward(self, x): + return self.linear(x) + + def train_step(self, x, optimizer, **kwargs): + return dict(loss=self(x)) + + def val_step(self, x, optimizer, **kwargs): + return dict(loss=self(x)) + + model = Model() + tmp_dir = tempfile.mkdtemp() + + runner = EpochBasedRunner( + model=model, work_dir=tmp_dir, logger=get_logger('demo')) + return runner + + +def test_eval_hook(): + with pytest.raises(TypeError): + # `save_best` should be a boolean + test_dataset = ExampleModel() + data_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_workers=0, + shuffle=False) + EpochEvalHook(data_loader, save_best='True') + + with pytest.raises(TypeError): + # dataloader must be a pytorch DataLoader + test_dataset = ExampleModel() + data_loader = [ + DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_worker=0, + shuffle=False) + ] + EpochEvalHook(data_loader) + + with pytest.raises(ValueError): + # when `save_best` is True, `key_indicator` should not be None + test_dataset = ExampleModel() + data_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_workers=0, + shuffle=False) + EpochEvalHook(data_loader, key_indicator=None) + + with pytest.raises(KeyError): + # rule must be in keys of rule_map + test_dataset = ExampleModel() + data_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_workers=0, + shuffle=False) + EpochEvalHook(data_loader, save_best=False, rule='unsupport') + + with pytest.raises(ValueError): + # key_indicator must be valid when rule_map is None + test_dataset = ExampleModel() + data_loader = DataLoader( + test_dataset, + batch_size=1, + sampler=None, + num_workers=0, + shuffle=False) + EpochEvalHook(data_loader, key_indicator='unsupport') + + optimizer_cfg = dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) + + test_dataset = ExampleDataset() + loader = DataLoader(test_dataset, batch_size=1) + model = ExampleModel() + optimizer = build_optimizer(model, optimizer_cfg) + + data_loader = DataLoader(test_dataset, batch_size=1) + eval_hook = EpochEvalHook(data_loader, save_best=False) + with tempfile.TemporaryDirectory() as tmpdir: + logger = get_logger('test_eval') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 1) + test_dataset.evaluate.assert_called_with( + test_dataset, [torch.tensor([1])], logger=runner.logger) + + best_json_path = osp.join(tmpdir, 'best.json') + assert not osp.exists(best_json_path) + + loader = DataLoader(EvalDataset(), batch_size=1) + model = ExampleModel() + data_loader = DataLoader(EvalDataset(), batch_size=1) + eval_hook = EpochEvalHook( + data_loader, interval=1, save_best=True, key_indicator='acc') + + with tempfile.TemporaryDirectory() as tmpdir: + logger = get_logger('test_eval') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger) + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 8) + + best_json_path = osp.join(tmpdir, 'best.json') + best_json = mmcv.load(best_json_path) + real_path = osp.join(tmpdir, 'epoch_4.pth') + + assert best_json['best_ckpt'] == osp.realpath(real_path) + assert best_json['best_score'] == 7 + assert best_json['key_indicator'] == 'acc' + + data_loader = DataLoader(EvalDataset(), batch_size=1) + eval_hook = EpochEvalHook( + data_loader, + interval=1, + save_best=True, + key_indicator='score', + rule='greater') + with tempfile.TemporaryDirectory() as tmpdir: + logger = get_logger('test_eval') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger) + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 8) + + best_json_path = osp.join(tmpdir, 'best.json') + best_json = mmcv.load(best_json_path) + real_path = osp.join(tmpdir, 'epoch_4.pth') + + assert best_json['best_ckpt'] == osp.realpath(real_path) + assert best_json['best_score'] == 7 + assert best_json['key_indicator'] == 'score' + + data_loader = DataLoader(EvalDataset(), batch_size=1) + eval_hook = EpochEvalHook(data_loader, rule='less', key_indicator='acc') + with tempfile.TemporaryDirectory() as tmpdir: + logger = get_logger('test_eval') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger) + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 8) + + best_json_path = osp.join(tmpdir, 'best.json') + best_json = mmcv.load(best_json_path) + real_path = osp.join(tmpdir, 'epoch_6.pth') + + assert best_json['best_ckpt'] == osp.realpath(real_path) + assert best_json['best_score'] == -3 + assert best_json['key_indicator'] == 'acc' + + data_loader = DataLoader(EvalDataset(), batch_size=1) + eval_hook = EpochEvalHook(data_loader, key_indicator='acc') + with tempfile.TemporaryDirectory() as tmpdir: + logger = get_logger('test_eval') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger) + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_hook(eval_hook) + runner.run([loader], [('train', 1)], 2) + + best_json_path = osp.join(tmpdir, 'best.json') + best_json = mmcv.load(best_json_path) + real_path = osp.join(tmpdir, 'epoch_2.pth') + + assert best_json['best_ckpt'] == osp.realpath(real_path) + assert best_json['best_score'] == 4 + assert best_json['key_indicator'] == 'acc' + + resume_from = osp.join(tmpdir, 'latest.pth') + loader = DataLoader(ExampleDataset(), batch_size=1) + eval_hook = EpochEvalHook(data_loader, key_indicator='acc') + runner = EpochBasedRunner( + model=model, + batch_processor=None, + optimizer=optimizer, + work_dir=tmpdir, + logger=logger) + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_hook(eval_hook) + runner.resume(resume_from) + runner.run([loader], [('train', 1)], 8) + + best_json_path = osp.join(tmpdir, 'best.json') + best_json = mmcv.load(best_json_path) + real_path = osp.join(tmpdir, 'epoch_4.pth') + + assert best_json['best_ckpt'] == osp.realpath(real_path) + assert best_json['best_score'] == 7 + assert best_json['key_indicator'] == 'acc' + + +@patch('mmaction.apis.single_gpu_test', MagicMock) +@patch('mmaction.apis.multi_gpu_test', MagicMock) +@pytest.mark.parametrize('EpochEvalHookParam', + (EpochEvalHook, DistEpochEvalHook)) +def test_start_param(EpochEvalHookParam): + # create dummy data + dataloader = DataLoader(torch.ones((5, 2))) + + # 0.1. dataloader is not a DataLoader object + with pytest.raises(TypeError): + EpochEvalHookParam(dataloader=MagicMock(), interval=-1) + + # 0.2. negative interval + with pytest.raises(ValueError): + EpochEvalHookParam(dataloader, interval=-1) + + # 1. start=None, interval=1: perform evaluation after each epoch. + runner = _build_demo_runner() + evalhook = EpochEvalHookParam(dataloader, interval=1, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner.run([dataloader], [('train', 1)], 2) + assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2 + + # 2. start=1, interval=1: perform evaluation after each epoch. + runner = _build_demo_runner() + evalhook = EpochEvalHookParam( + dataloader, start=1, interval=1, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner.run([dataloader], [('train', 1)], 2) + assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2 + + # 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc + runner = _build_demo_runner() + evalhook = EpochEvalHookParam(dataloader, interval=2, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner.run([dataloader], [('train', 1)], 2) + assert evalhook.evaluate.call_count == 1 # after epoch 2 + + # 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc + runner = _build_demo_runner() + evalhook = EpochEvalHookParam( + dataloader, start=1, interval=2, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner.run([dataloader], [('train', 1)], 3) + assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3 + + # 5. start=0/negative, interval=1: perform evaluation after each epoch and + # before epoch 1. + runner = _build_demo_runner() + evalhook = EpochEvalHookParam(dataloader, start=0, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner.run([dataloader], [('train', 1)], 2) + assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2 + + runner = _build_demo_runner() + with pytest.warns(UserWarning): + evalhook = EpochEvalHookParam(dataloader, start=-2, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner.run([dataloader], [('train', 1)], 2) + assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2 + + # 6. resuming from epoch i, start = x (x<=i), interval =1: perform + # evaluation after each epoch and before the first epoch. + runner = _build_demo_runner() + evalhook = EpochEvalHookParam(dataloader, start=1, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner._epoch = 2 + runner.run([dataloader], [('train', 1)], 3) + assert evalhook.evaluate.call_count == 2 # before & after epoch 3 + + # 7. resuming from epoch i, start = i+1/None, interval =1: perform + # evaluation after each epoch. + runner = _build_demo_runner() + evalhook = EpochEvalHookParam(dataloader, start=2, save_best=False) + evalhook.evaluate = MagicMock() + runner.register_hook(evalhook) + runner._epoch = 1 + runner.run([dataloader], [('train', 1)], 3) + assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3 diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_lr.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_lr.py new file mode 100644 index 0000000..f2f6a81 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_lr.py @@ -0,0 +1,117 @@ +import logging +import shutil +import sys +import tempfile +from unittest.mock import MagicMock, call + +import torch +import torch.nn as nn +from mmcv.runner import IterTimerHook, PaviLoggerHook, build_runner +from torch.utils.data import DataLoader + + +def test_tin_lr_updater_hook(): + sys.modules['pavi'] = MagicMock() + loader = DataLoader(torch.ones((10, 2))) + runner = _build_demo_runner() + + hook_cfg = dict(type='TINLrUpdaterHook', min_lr=0.1) + runner.register_hook_from_cfg(hook_cfg) + + hook_cfg = dict( + type='TINLrUpdaterHook', + by_epoch=False, + min_lr=0.1, + warmup='exp', + warmup_iters=2, + warmup_ratio=0.9) + runner.register_hook_from_cfg(hook_cfg) + runner.register_hook_from_cfg(dict(type='IterTimerHook')) + runner.register_hook(IterTimerHook()) + + hook_cfg = dict( + type='TINLrUpdaterHook', + by_epoch=False, + min_lr=0.1, + warmup='constant', + warmup_iters=2, + warmup_ratio=0.9) + runner.register_hook_from_cfg(hook_cfg) + runner.register_hook_from_cfg(dict(type='IterTimerHook')) + runner.register_hook(IterTimerHook()) + + hook_cfg = dict( + type='TINLrUpdaterHook', + by_epoch=False, + min_lr=0.1, + warmup='linear', + warmup_iters=2, + warmup_ratio=0.9) + runner.register_hook_from_cfg(hook_cfg) + runner.register_hook_from_cfg(dict(type='IterTimerHook')) + runner.register_hook(IterTimerHook()) + # add pavi hook + hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) + runner.register_hook(hook) + runner.run([loader], [('train', 1)]) + shutil.rmtree(runner.work_dir) + + assert hasattr(hook, 'writer') + calls = [ + call('train', { + 'learning_rate': 0.028544155877284292, + 'momentum': 0.95 + }, 1), + call('train', { + 'learning_rate': 0.04469266270539641, + 'momentum': 0.95 + }, 6), + call('train', { + 'learning_rate': 0.09695518130045147, + 'momentum': 0.95 + }, 10) + ] + hook.writer.add_scalars.assert_has_calls(calls, any_order=True) + + +def _build_demo_runner(runner_type='EpochBasedRunner', + max_epochs=1, + max_iters=None): + + class Model(nn.Module): + + def __init__(self): + super().__init__() + self.linear = nn.Linear(2, 1) + + def forward(self, x): + return self.linear(x) + + def train_step(self, x, optimizer, **kwargs): + return dict(loss=self(x)) + + def val_step(self, x, optimizer, **kwargs): + return dict(loss=self(x)) + + model = Model() + + optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95) + + log_config = dict( + interval=1, hooks=[ + dict(type='TextLoggerHook'), + ]) + + tmp_dir = tempfile.mkdtemp() + runner = build_runner( + dict(type=runner_type), + default_args=dict( + model=model, + work_dir=tmp_dir, + optimizer=optimizer, + logger=logging.getLogger(), + max_epochs=max_epochs, + max_iters=max_iters)) + runner.register_checkpoint_hook(dict(interval=1)) + runner.register_logger_hooks(log_config) + return runner diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_onnx.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_onnx.py new file mode 100644 index 0000000..22fc097 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_onnx.py @@ -0,0 +1,28 @@ +import os.path as osp +import tempfile + +import torch.nn as nn +from tools.pytorch2onnx import _convert_batchnorm, pytorch2onnx + + +class TestModel(nn.Module): + + def __init__(self): + super().__init__() + self.conv = nn.Conv3d(1, 2, 1) + self.bn = nn.SyncBatchNorm(2) + + def forward(self, x): + return self.bn(self.conv(x)) + + def forward_dummy(self, x): + return (self.forward(x), ) + + +def test_onnx_exporting(): + with tempfile.TemporaryDirectory() as tmpdir: + out_file = osp.join(tmpdir, 'tmp.onnx') + model = TestModel() + model = _convert_batchnorm(model) + # test exporting + pytorch2onnx(model, (1, 1, 1, 1, 1), output_file=out_file) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_optimizer.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_optimizer.py new file mode 100644 index 0000000..0ff8ef9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_optimizer.py @@ -0,0 +1,213 @@ +import torch +import torch.nn as nn +from mmcv.runner import build_optimizer_constructor + + +class SubModel(nn.Module): + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2) + self.gn = nn.GroupNorm(2, 2) + self.fc = nn.Linear(2, 2) + self.param1 = nn.Parameter(torch.ones(1)) + + def forward(self, x): + return x + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.param1 = nn.Parameter(torch.ones(1)) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(4, 2, kernel_size=1) + self.bn = nn.BatchNorm2d(2) + self.sub = SubModel() + self.fc = nn.Linear(2, 1) + + def forward(self, x): + return x + + +class PseudoDataParallel(nn.Module): + + def __init__(self): + super().__init__() + self.module = ExampleModel() + + def forward(self, x): + return x + + +base_lr = 0.01 +base_wd = 0.0001 +momentum = 0.9 + + +def check_optimizer(optimizer, + model, + prefix='', + bias_lr_mult=1, + bias_decay_mult=1, + norm_decay_mult=1, + dwconv_decay_mult=1): + param_groups = optimizer.param_groups + assert isinstance(optimizer, torch.optim.SGD) + assert optimizer.defaults['lr'] == base_lr + assert optimizer.defaults['momentum'] == momentum + assert optimizer.defaults['weight_decay'] == base_wd + model_parameters = list(model.parameters()) + assert len(param_groups) == len(model_parameters) + for i, param in enumerate(model_parameters): + param_group = param_groups[i] + assert torch.equal(param_group['params'][0], param) + assert param_group['momentum'] == momentum + # param1 + param1 = param_groups[0] + assert param1['lr'] == base_lr + assert param1['weight_decay'] == base_wd + # conv1.weight + conv1_weight = param_groups[1] + assert conv1_weight['lr'] == base_lr + assert conv1_weight['weight_decay'] == base_wd + # conv2.weight + conv2_weight = param_groups[2] + assert conv2_weight['lr'] == base_lr + assert conv2_weight['weight_decay'] == base_wd + # conv2.bias + conv2_bias = param_groups[3] + assert conv2_bias['lr'] == base_lr * bias_lr_mult + assert conv2_bias['weight_decay'] == base_wd * bias_decay_mult + # bn.weight + bn_weight = param_groups[4] + assert bn_weight['lr'] == base_lr + assert bn_weight['weight_decay'] == base_wd * norm_decay_mult + # bn.bias + bn_bias = param_groups[5] + assert bn_bias['lr'] == base_lr + assert bn_bias['weight_decay'] == base_wd * norm_decay_mult + # sub.param1 + sub_param1 = param_groups[6] + assert sub_param1['lr'] == base_lr + assert sub_param1['weight_decay'] == base_wd + # sub.conv1.weight + sub_conv1_weight = param_groups[7] + assert sub_conv1_weight['lr'] == base_lr + assert sub_conv1_weight['weight_decay'] == base_wd * dwconv_decay_mult + # sub.conv1.bias + sub_conv1_bias = param_groups[8] + assert sub_conv1_bias['lr'] == base_lr * bias_lr_mult + assert sub_conv1_bias['weight_decay'] == base_wd * dwconv_decay_mult + # sub.gn.weight + sub_gn_weight = param_groups[9] + assert sub_gn_weight['lr'] == base_lr + assert sub_gn_weight['weight_decay'] == base_wd * norm_decay_mult + # sub.gn.bias + sub_gn_bias = param_groups[10] + assert sub_gn_bias['lr'] == base_lr + assert sub_gn_bias['weight_decay'] == base_wd * norm_decay_mult + # sub.fc1.weight + sub_fc_weight = param_groups[11] + assert sub_fc_weight['lr'] == base_lr + assert sub_fc_weight['weight_decay'] == base_wd + # sub.fc1.bias + sub_fc_bias = param_groups[12] + assert sub_fc_bias['lr'] == base_lr * bias_lr_mult + assert sub_fc_bias['weight_decay'] == base_wd * bias_decay_mult + # fc1.weight + fc_weight = param_groups[13] + assert fc_weight['lr'] == base_lr + assert fc_weight['weight_decay'] == base_wd + # fc1.bias + fc_bias = param_groups[14] + assert fc_bias['lr'] == base_lr * bias_lr_mult + assert fc_bias['weight_decay'] == base_wd * bias_decay_mult + + +def check_tsm_optimizer(optimizer, model, fc_lr5=True): + param_groups = optimizer.param_groups + assert isinstance(optimizer, torch.optim.SGD) + assert optimizer.defaults['lr'] == base_lr + assert optimizer.defaults['momentum'] == momentum + assert optimizer.defaults['weight_decay'] == base_wd + model_parameters = list(model.parameters()) + # first_conv_weight + first_conv_weight = param_groups[0] + assert torch.equal(first_conv_weight['params'][0], model_parameters[1]) + assert first_conv_weight['lr'] == base_lr + assert first_conv_weight['weight_decay'] == base_wd + # first_conv_bias + first_conv_bias = param_groups[1] + assert first_conv_bias['params'] == [] + assert first_conv_bias['lr'] == base_lr * 2 + assert first_conv_bias['weight_decay'] == 0 + # normal_weight + normal_weight = param_groups[2] + assert torch.equal(normal_weight['params'][0], model_parameters[2]) + assert torch.equal(normal_weight['params'][1], model_parameters[7]) + assert normal_weight['lr'] == base_lr + assert normal_weight['weight_decay'] == base_wd + # normal_bias + normal_bias = param_groups[3] + assert torch.equal(normal_bias['params'][0], model_parameters[3]) + assert torch.equal(normal_bias['params'][1], model_parameters[8]) + assert normal_bias['lr'] == base_lr * 2 + assert normal_bias['weight_decay'] == 0 + # bn + bn = param_groups[4] + assert torch.equal(bn['params'][0], model_parameters[4]) + assert torch.equal(bn['params'][1], model_parameters[5]) + assert torch.equal(bn['params'][2], model_parameters[9]) + assert torch.equal(bn['params'][3], model_parameters[10]) + assert bn['lr'] == base_lr + assert bn['weight_decay'] == 0 + # normal linear weight + assert torch.equal(normal_weight['params'][2], model_parameters[11]) + # normal linear bias + assert torch.equal(normal_bias['params'][2], model_parameters[12]) + # fc_lr5 + lr5_weight = param_groups[5] + lr10_bias = param_groups[6] + assert lr5_weight['lr'] == base_lr * 5 + assert lr5_weight['weight_decay'] == base_wd + assert lr10_bias['lr'] == base_lr * 10 + assert lr10_bias['weight_decay'] == 0 + if fc_lr5: + # lr5_weight + assert torch.equal(lr5_weight['params'][0], model_parameters[13]) + # lr10_bias + assert torch.equal(lr10_bias['params'][0], model_parameters[14]) + else: + # lr5_weight + assert lr5_weight['params'] == [] + # lr10_bias + assert lr10_bias['params'] == [] + assert torch.equal(normal_weight['params'][3], model_parameters[13]) + assert torch.equal(normal_bias['params'][3], model_parameters[14]) + + +def test_tsm_optimizer_constructor(): + model = ExampleModel() + optimizer_cfg = dict( + type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) + # fc_lr5 is True + paramwise_cfg = dict(fc_lr5=True) + optim_constructor_cfg = dict( + type='TSMOptimizerConstructor', + optimizer_cfg=optimizer_cfg, + paramwise_cfg=paramwise_cfg) + optim_constructor = build_optimizer_constructor(optim_constructor_cfg) + optimizer = optim_constructor(model) + check_tsm_optimizer(optimizer, model, **paramwise_cfg) + + # fc_lr5 is False + paramwise_cfg = dict(fc_lr5=False) + optim_constructor_cfg = dict( + type='TSMOptimizerConstructor', + optimizer_cfg=optimizer_cfg, + paramwise_cfg=paramwise_cfg) + optim_constructor = build_optimizer_constructor(optim_constructor_cfg) + optimizer = optim_constructor(model) + check_tsm_optimizer(optimizer, model, **paramwise_cfg) diff --git a/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_train.py b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_train.py new file mode 100644 index 0000000..856a7f6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_train.py @@ -0,0 +1,111 @@ +import tempfile + +import pytest +import torch +import torch.nn as nn +from mmcv import Config +from torch.utils.data import Dataset + +from mmaction.apis import train_model +from mmaction.datasets.registry import DATASETS + + +@DATASETS.register_module() +class ExampleDataset(Dataset): + + def __init__(self, test_mode=False): + self.test_mode = test_mode + + def evaluate(self, results, logger=None): + eval_results = dict() + eval_results['acc'] = 1 + return eval_results + + def __getitem__(self, idx): + results = dict(imgs=torch.tensor([1])) + return results + + def __len__(self): + return 1 + + +class ExampleModel(nn.Module): + + def __init__(self): + super().__init__() + self.test_cfg = None + self.conv1 = nn.Conv2d(3, 8, kernel_size=1) + self.norm1 = nn.BatchNorm1d(2) + + def forward(self, imgs, return_loss=False): + self.norm1(torch.rand(3, 2).cuda()) + losses = dict() + losses['test_loss'] = torch.tensor([0.5], requires_grad=True) + return losses + + def train_step(self, data_batch, optimizer, **kwargs): + imgs = data_batch['imgs'] + losses = self.forward(imgs, True) + loss = torch.tensor([0.5], requires_grad=True) + outputs = dict(loss=loss, log_vars=losses, num_samples=3) + return outputs + + def val_step(self, data_batch, optimizer, **kwargs): + imgs = data_batch['imgs'] + self.forward(imgs, False) + outputs = dict(results=0.5) + return outputs + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_train_model(): + model = ExampleModel() + dataset = ExampleDataset() + datasets = [ExampleDataset(), ExampleDataset()] + cfg = dict( + seed=0, + gpus=1, + gpu_ids=[0], + resume_from=None, + load_from=None, + workflow=[('train', 1)], + total_epochs=5, + evaluation=dict(interval=1, key_indicator='acc'), + data=dict( + videos_per_gpu=1, + workers_per_gpu=0, + val=dict(type='ExampleDataset')), + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001), + optimizer_config=dict(grad_clip=dict(max_norm=40, norm_type=2)), + lr_config=dict(policy='step', step=[40, 80]), + omnisource=False, + checkpoint_config=dict(interval=1), + log_level='INFO', + log_config=dict(interval=20, hooks=[dict(type='TextLoggerHook')])) + + with tempfile.TemporaryDirectory() as tmpdir: + # normal train + cfg['work_dir'] = tmpdir + config = Config(cfg) + train_model(model, dataset, config) + + with tempfile.TemporaryDirectory() as tmpdir: + # train with validation + cfg['work_dir'] = tmpdir + config = Config(cfg) + train_model(model, dataset, config, validate=True) + + with tempfile.TemporaryDirectory() as tmpdir: + # train with Fp16OptimizerHook + cfg['work_dir'] = tmpdir + cfg['fp16'] = dict(loss_scale=512.) + config = Config(cfg) + model.fp16_enabled = None + train_model(model, dataset, config) + + with tempfile.TemporaryDirectory() as tmpdir: + cfg['work_dir'] = tmpdir + cfg['omnisource'] = True + config = Config(cfg) + train_model(model, datasets, config) diff --git a/Downstream/Open-Set-Action-Recognition/tools/__init__.py b/Downstream/Open-Set-Action-Recognition/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/analyze_logs.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/analyze_logs.py new file mode 100644 index 0000000..84380d2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/analyze_logs.py @@ -0,0 +1,166 @@ +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[0]]: + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}') + xs = [] + ys = [] + num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1] + for epoch in epochs: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append(np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot(xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['top1_acc'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, top1_acc + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/bench_processing.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/bench_processing.py new file mode 100644 index 0000000..3a44603 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/bench_processing.py @@ -0,0 +1,63 @@ +"""This file is for benchmark dataloading process. The command line to run this +file is: + +$ python -m cProfile -o program.prof tools/analysis/bench_processing.py +configs/task/method/[config filename] + +It use cProfile to record cpu running time and output to program.prof +To visualize cProfile output program.prof, use Snakeviz and run: +$ snakeviz program.prof +""" +import argparse +import os + +import mmcv +from mmcv import Config + +from mmaction import __version__ +from mmaction.datasets import build_dataloader, build_dataset +from mmaction.utils import get_root_logger + + +def main(): + parser = argparse.ArgumentParser(description='Benchmark dataloading') + parser.add_argument('config', help='train config file path') + args = parser.parse_args() + cfg = Config.fromfile(args.config) + + # init logger before other steps + logger = get_root_logger() + logger.info(f'MMAction2 Version: {__version__}') + logger.info(f'Config: {cfg.text}') + + # create bench data list + ann_file_bench = 'benchlist.txt' + if not os.path.exists(ann_file_bench): + with open(cfg.ann_file_train) as f: + lines = f.readlines()[:256] + with open(ann_file_bench, 'w') as f1: + f1.writelines(lines) + cfg.data.train.ann_file = ann_file_bench + + dataset = build_dataset(cfg.data.train) + data_loader = build_dataloader( + dataset, + videos_per_gpu=cfg.data.videos_per_gpu, + workers_per_gpu=0, + num_gpus=1, + dist=False) + + # Start progress bar after first 5 batches + prog_bar = mmcv.ProgressBar( + len(dataset) - 5 * cfg.data.videos_per_gpu, start=False) + for i, data in enumerate(data_loader): + if i == 5: + prog_bar.start() + for _ in data['imgs']: + if i < 5: + continue + prog_bar.update() + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/benchmark.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/benchmark.py new file mode 100644 index 0000000..2981335 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/benchmark.py @@ -0,0 +1,91 @@ +import argparse +import time + +import torch +from mmcv import Config +from mmcv.cnn import fuse_conv_bn +from mmcv.parallel import MMDataParallel +from mmcv.runner.fp16_utils import wrap_fp16_model + +from mmaction.datasets import build_dataloader, build_dataset +from mmaction.models import build_model + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMAction2 benchmark a recognizer') + parser.add_argument('config', help='test config file path') + parser.add_argument( + '--log-interval', default=10, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.backbone.pretrained = None + cfg.data.test.test_mode = True + + # build the dataloader + dataset = build_dataset(cfg.data.test, dict(test_mode=True)) + data_loader = build_dataloader( + dataset, + videos_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + # build the model and load checkpoint + model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + + model = MMDataParallel(model, device_ids=[0]) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + + # benchmark with 2000 video and take the average + for i, data in enumerate(data_loader): + + torch.cuda.synchronize() + start_time = time.perf_counter() + + with torch.no_grad(): + model(return_loss=False, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print( + f'Done video [{i + 1:<3}/ 2000], fps: {fps:.1f} video / s') + + if (i + 1) == 200: + pure_inf_time += elapsed + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.1f} video / s') + break + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/eval_metric.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/eval_metric.py new file mode 100644 index 0000000..ca7d0b1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/eval_metric.py @@ -0,0 +1,65 @@ +import argparse + +import mmcv +from mmcv import Config, DictAction + +from mmaction.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Evaluate metric of the ' + 'results saved in pkl/yaml/json format') + parser.add_argument('config', help='Config of the model') + parser.add_argument('results', help='Results in pkl/yaml/json format') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g.,' + ' "top_k_accuracy", "mean_class_accuracy" for video dataset') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + assert args.eval is not None + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + cfg.data.test.test_mode = True + + dataset = build_dataset(cfg.data.test) + outputs = mmcv.load(args.results) + + kwargs = {} if args.eval_options is None else args.eval_options + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EpochEvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', + 'key_indicator' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metrics=args.eval, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/get_flops.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/get_flops.py new file mode 100644 index 0000000..b9cddf3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/get_flops.py @@ -0,0 +1,69 @@ +import argparse + +from mmcv import Config + +from mmaction.models import build_recognizer + +try: + from mmcv.cnn import get_model_complexity_info +except ImportError: + raise ImportError('Please upgrade mmcv to >0.6.2') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a recognizer') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[340, 256], + help='input image size') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = ( + 1, + 3, + ) + tuple(args.shape) + elif len(args.shape) == 4: + # n, c, h, w = args.shape + input_shape = tuple(args.shape) + elif len(args.shape) == 5: + # n, c, t, h, w = args.shape + input_shape = tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + model = build_recognizer( + cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) + model = model.cuda() + model.eval() + + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + flops, params = get_model_complexity_info(model, input_shape) + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/print_config.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/print_config.py new file mode 100644 index 0000000..2ba994f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/print_config.py @@ -0,0 +1,26 @@ +import argparse + +from mmcv import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', nargs='+', action=DictAction, help='arguments in dict') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.options is not None: + cfg.merge_from_dict(args.options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/report_accuracy.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/report_accuracy.py new file mode 100644 index 0000000..fc42eb1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/report_accuracy.py @@ -0,0 +1,56 @@ +import argparse + +from mmcv import load +from scipy.special import softmax + +from mmaction.core.evaluation import (get_weighted_score, mean_class_accuracy, + top_k_accuracy) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Fusing multiple scores') + parser.add_argument( + '--scores', + nargs='+', + help='list of scores', + default=['demo/fuse/rgb.pkl', 'demo/fuse/flow.pkl']) + parser.add_argument( + '--coefficients', + nargs='+', + type=float, + help='coefficients of each score file', + default=[1.0, 1.0]) + parser.add_argument( + '--datalist', + help='list of testing data', + default='demo/fuse/data_list.txt') + parser.add_argument('--apply-softmax', action='store_true') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + assert len(args.scores) == len(args.coefficients) + score_list = args.scores + score_list = [load(f) for f in score_list] + if args.apply_softmax: + + def apply_softmax(scores): + return [softmax(score) for score in scores] + + score_list = [apply_softmax(scores) for scores in score_list] + + weighted_scores = get_weighted_score(score_list, args.coefficients) + data = open(args.datalist).readlines() + labels = [int(x.strip().split()[-1]) for x in data] + + mean_class_acc = mean_class_accuracy(weighted_scores, labels) + top_1_acc, top_5_acc = top_k_accuracy(weighted_scores, labels, (1, 5)) + print(f'Mean Class Accuracy: {mean_class_acc:.04f}') + print(f'Top 1 Accuracy: {top_1_acc:.04f}') + print(f'Top 5 Accuracy: {top_5_acc:.04f}') + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/analysis/report_map.py b/Downstream/Open-Set-Action-Recognition/tools/analysis/report_map.py new file mode 100644 index 0000000..32e49ef --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/analysis/report_map.py @@ -0,0 +1,86 @@ +import argparse +import os +import os.path as osp + +import mmcv +import numpy as np + +from mmaction.core import ActivityNetDetection + +args = None + + +def cuhk17_top1(): + """Assign label for each proposal with the cuhk17 result, which is the #2 + entry in http://activity-net.org/challenges/2017/evaluation.html.""" + if not osp.exists('cuhk_anet17_pred.json'): + os.system('wget https://download.openmmlab.com/' + 'mmaction/localization/cuhk_anet17_pred.json') + proposal = mmcv.load(args.proposal) + results = proposal['results'] + cuhk_pred = mmcv.load('cuhk_anet17_pred.json')['results'] + + def get_topk(preds, k): + preds.sort(key=lambda x: x['score']) + return preds[-k:] + + for k, v in results.items(): + action_pred = cuhk_pred[k] + top1 = get_topk(action_pred, 1) + top1_label = top1[0]['label'] + new_value = [] + for item in v: + x = dict(label=top1_label) + x.update(item) + new_value.append(x) + results[k] = new_value + proposal['results'] = results + mmcv.dump(proposal, args.det_output) + + +cls_funcs = {'cuhk17_top1': cuhk17_top1} + + +def parse_args(): + parser = argparse.ArgumentParser(description='Report detection mAP for' + 'ActivityNet proposal file') + parser.add_argument('--proposal', type=str, help='proposal file') + parser.add_argument( + '--gt', + type=str, + default='data/ActivityNet/' + 'anet_anno_val.json', + help='groundtruth file') + parser.add_argument( + '--cls', + type=str, + default='cuhk17_top1', + choices=['cuhk17_top1'], + help='the way to assign label for each ' + 'proposal') + parser.add_argument( + '--det-output', + type=str, + default='det_result.json', + help='the path to store detection results') + args = parser.parse_args() + return args + + +def main(): + global args, cls_funcs + args = parse_args() + func = cls_funcs[args.cls] + func() + anet_detection = ActivityNetDetection( + args.gt, + args.det_output, + tiou_thresholds=np.linspace(0.5, 0.95, 10), + verbose=True) + mAP, average_mAP = anet_detection.evaluate() + print('[RESULTS] Performance on ActivityNet detection task.\n' + f'mAP: {mAP}\nAverage-mAP: {average_mAP}') + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/argparse.bash b/Downstream/Open-Set-Action-Recognition/tools/argparse.bash new file mode 100644 index 0000000..4e034cd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/argparse.bash @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +# Use python's argparse module in shell scripts +# +# The function `argparse` parses its arguments using +# argparse.ArgumentParser; the parser is defined in the function's +# stdin. +# +# Executing ``argparse.bash`` (as opposed to sourcing it) prints a +# script template. +# +# https://github.com/nhoffman/argparse-bash +# MIT License - Copyright (c) 2015 Noah Hoffman +# +# The MIT License (MIT) +# +# Copyright (c) 2015 Noah Hoffman +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +argparse(){ + argparser=$(mktemp 2>/dev/null || mktemp -t argparser) + cat > "$argparser" <> "$argparser" + + cat >> "$argparser" < /dev/null; then + eval $(python "$argparser" "$@") + retval=0 + else + python "$argparser" "$@" + retval=1 + fi + + rm "$argparser" + return $retval +} + +# print a script template when this script is executed +if [[ $0 == *argparse.bash ]]; then + cat < num_frames - 1: + end = num_frames - 1 + newline = f'{k} {start} {end - start + 1} {label}' + lines.append(newline) + return lines + + train_clips, val_clips = [], [] + for k in train_videos: + train_clips.extend(clip_list(k, anet_annotations[k], train_videos[k])) + for k in val_videos: + val_clips.extend(clip_list(k, anet_annotations[k], val_videos[k])) + + with open(osp.join(data_file, 'anet_train_clip.txt'), 'w') as fout: + fout.write('\n'.join(train_clips)) + with open(osp.join(data_file, 'anet_val_clip.txt'), 'w') as fout: + fout.write('\n'.join(val_clips)) + + +if __name__ == '__main__': + generate_rawframes_filelist() diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/activitynet/process_annotations.py b/Downstream/Open-Set-Action-Recognition/tools/data/activitynet/process_annotations.py new file mode 100644 index 0000000..6309ed1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/activitynet/process_annotations.py @@ -0,0 +1,53 @@ +"""This file processes the annotation files and generates proper annotation +files for localizers.""" +import json + +import numpy as np + + +def load_json(file): + with open(file) as json_file: + data = json.load(json_file) + return data + + +data_file = '../../../data/ActivityNet' +info_file = f'{data_file}/video_info_new.csv' +ann_file = f'{data_file}/anet_anno_action.json' + +anno_database = load_json(ann_file) + +video_record = np.loadtxt(info_file, dtype=np.str, delimiter=',', skiprows=1) + +video_dict_train = {} +video_dict_val = {} +video_dict_test = {} +video_dict_full = {} + +for i in range(len(video_record)): + video_name = video_record[i][0] + video_info = anno_database[video_name] + video_subset = video_record[i][5] + video_info['fps'] = video_record[i][3].astype(np.float) + video_info['rfps'] = video_record[i][4].astype(np.float) + video_dict_full[video_name] = video_info + if video_subset == 'training': + video_dict_train[video_name] = video_info + elif video_subset == 'testing': + video_dict_test[video_name] = video_info + elif video_subset == 'validation': + video_dict_val[video_name] = video_info + +print(f'full subset video numbers: {len(video_record)}') + +with open(f'{data_file}/anet_anno_train.json', 'w') as result_file: + json.dump(video_dict_train, result_file) + +with open(f'{data_file}/anet_anno_val.json', 'w') as result_file: + json.dump(video_dict_val, result_file) + +with open(f'{data_file}/anet_anno_test.json', 'w') as result_file: + json.dump(video_dict_test, result_file) + +with open(f'{data_file}/anet_anno_full.json', 'w') as result_file: + json.dump(video_dict_full, result_file) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/activitynet/tsn_feature_extraction.py b/Downstream/Open-Set-Action-Recognition/tools/data/activitynet/tsn_feature_extraction.py new file mode 100644 index 0000000..d47e7df --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/activitynet/tsn_feature_extraction.py @@ -0,0 +1,147 @@ +import argparse +import os +import os.path as osp +import pickle + +import mmcv +import numpy as np +import torch + +from mmaction.datasets.pipelines import Compose +from mmaction.models import build_model + + +def parse_args(): + parser = argparse.ArgumentParser(description='Extract TSN Feature') + parser.add_argument('--data-prefix', default='', help='dataset prefix') + parser.add_argument('--output-prefix', default='', help='output prefix') + parser.add_argument( + '--data-list', + help='video list of the dataset, the format should be ' + '`frame_dir num_frames output_file`') + parser.add_argument( + '--frame-interval', + type=int, + default=16, + help='the sampling frequency of frame in the untrimed video') + parser.add_argument('--modality', default='RGB', choices=['RGB', 'Flow']) + parser.add_argument('--ckpt', help='checkpoint for feature extraction') + parser.add_argument( + '--part', + type=int, + default=0, + help='which part of dataset to forward(alldata[part::total])') + parser.add_argument( + '--total', type=int, default=1, help='how many parts exist') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + args.is_rgb = args.modality == 'RGB' + args.clip_len = 1 if args.is_rgb else 5 + args.input_format = 'NCHW' if args.is_rgb else 'NCHW_Flow' + rgb_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_bgr=False) + flow_norm_cfg = dict(mean=[128, 128], std=[128, 128]) + args.img_norm_cfg = rgb_norm_cfg if args.is_rgb else flow_norm_cfg + args.f_tmpl = 'img_{:05d}.jpg' if args.is_rgb else 'flow_{}_{:05d}.jpg' + args.in_channels = args.clip_len * (3 if args.is_rgb else 2) + # max batch_size for one forward + args.batch_size = 200 + + # define the data pipeline for Untrimmed Videos + data_pipeline = [ + dict( + type='UntrimmedSampleFrames', + clip_len=args.clip_len, + frame_interval=args.frame_interval, + start_index=0), + dict(type='FrameSelector'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=256), + dict(type='Normalize', **args.img_norm_cfg), + dict(type='FormatShape', input_format=args.input_format), + dict(type='Collect', keys=['imgs'], meta_keys=[]), + dict(type='ToTensor', keys=['imgs']) + ] + data_pipeline = Compose(data_pipeline) + + # define TSN R50 model, the model is used as the feature extractor + model = dict( + type='Recognizer2D', + backbone=dict( + type='ResNet', + depth=50, + in_channels=args.in_channels, + norm_eval=False), + cls_head=dict( + type='TSNHead', + num_classes=200, + in_channels=2048, + spatial_type='avg', + consensus=dict(type='AvgConsensus', dim=1))) + model = build_model(model, test_cfg=dict(average_clips=None)) + # load pretrained weight into the feature extractor + state_dict = torch.load(args.ckpt)['state_dict'] + model.load_state_dict(state_dict) + model = model.cuda() + model.eval() + + data = open(args.data_list).readlines() + data = [x.strip() for x in data] + data = data[args.part::args.total] + + # enumerate Untrimmed videos, extract feature from each of them + prog_bar = mmcv.ProgressBar(len(data)) + if not osp.exists(args.output_prefix): + os.system(f'mkdir -p {args.output_prefix}') + + for item in data: + frame_dir, length, label = item.split() + output_file = osp.basename(frame_dir) + '.pkl' + frame_dir = osp.join(args.data_prefix, frame_dir) + output_file = osp.join(args.output_prefix, output_file) + assert output_file.endswith('.pkl') + length = int(length) + + # prepare a psuedo sample + tmpl = dict( + frame_dir=frame_dir, + total_frames=length, + filename_tmpl=args.f_tmpl, + start_index=0, + modality=args.modality) + sample = data_pipeline(tmpl) + imgs = sample['imgs'] + shape = imgs.shape + # the original shape should be N_seg * C * H * W, resize it to N_seg * + # 1 * C * H * W so that the network return feature of each frame (No + # score average among segments) + imgs = imgs.reshape((shape[0], 1) + shape[1:]) + imgs = imgs.cuda() + + def forward_data(model, data): + # chop large data into pieces and extract feature from them + results = [] + start_idx = 0 + num_clip = data.shape[0] + while start_idx < num_clip: + with torch.no_grad(): + part = data[start_idx:start_idx + args.batch_size] + feat = model.forward(part, return_loss=False) + results.append(feat) + start_idx += args.batch_size + return np.concatenate(results) + + feat = forward_data(model, imgs) + with open(output_file, 'wb') as fout: + pickle.dump(feat, fout) + prog_bar.update() + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/anno_txt2json.py b/Downstream/Open-Set-Action-Recognition/tools/data/anno_txt2json.py new file mode 100644 index 0000000..9f3d3ea --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/anno_txt2json.py @@ -0,0 +1,102 @@ +import argparse + +import mmcv + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert txt annotation list to json') + parser.add_argument( + 'annofile', type=str, help='the txt annotation file to convert') + parser.add_argument( + '--format', + type=str, + default='rawframes', + choices=['rawframes', 'videos'], + help='the format of the txt annotation file') + parser.add_argument( + '--output', + type=str, + default=None, + help=( + 'the output file name, use annofile.replace(\'.txt\', \'.json\') ' + 'if the arg value is None')) + args = parser.parse_args() + + return args + + +def lines2dictlist(lines, format): + """Convert lines in 'txt' format to dictionaries in 'json' format. + Currently support single-label and multi-label. + + Example of a single-label rawframes annotation txt file: + + .. code-block:: txt + + (frame_dir num_frames label) + some/directory-1 163 1 + some/directory-2 122 1 + some/directory-3 258 2 + + Example of a multi-label rawframes annotation txt file: + + .. code-block:: txt + + (frame_dir num_frames label1 label2 ...) + some/directory-1 163 1 3 5 + some/directory-2 122 1 2 + some/directory-3 258 2 + + Example of a single-label videos annotation txt file: + + .. code-block:: txt + + (filename label) + some/path/000.mp4 1 + some/path/001.mp4 1 + some/path/002.mp4 2 + + Example of a multi-label videos annotation txt file: + + .. code-block:: txt + + (filename label1 label2 ...) + some/path/000.mp4 1 3 5 + some/path/001.mp4 1 4 8 + some/path/002.mp4 2 4 9 + + Args: + lines (list): List of lines in 'txt' label format. + format (str): Data format, choices are 'rawframes' and 'videos'. + + Returns: + list[dict]: For rawframes format, each dict has keys: frame_dir, + total_frames, label; for videos format, each diction has keys: + filename, label. + """ + lines = [x.split() for x in lines] + if format == 'rawframes': + data = [ + dict( + frame_dir=line[0], + total_frames=int(line[1]), + label=[int(x) for x in line[2:]]) for line in lines + ] + elif format == 'videos': + data = [ + dict(filename=line[0], label=[int(x) for x in line[1:]]) + for line in lines + ] + return data + + +if __name__ == '__main__': + # convert txt anno list to json + args = parse_args() + lines = open(args.annofile).readlines() + lines = [x.strip() for x in lines] + result = lines2dictlist(lines, args.format) + if args.output is None: + args.output = args.annofile.replace('.txt', '.json') + mmcv.dump(result, args.output) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/ava/README.md new file mode 100644 index 0000000..2d9e58e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/README.md @@ -0,0 +1,136 @@ +# Preparing AVA + +For basic dataset information, please refer to the official [website](https://research.google.com/ava/index.html). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/ava/`. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_annotations.sh +``` + +This command will download `ava_v2.1.zip` for AVA `v2.1` annotation. If you need the AVA `v2.2` annotation, you can try the following script. + +```shell +VERSION=2.2 bash download_annotations.sh +``` + +## Step 2. Prepare Videos + +Then, use the following script to prepare videos. The codes are adapted from the [official crawler](https://github.com/cvdfoundation/ava-dataset). +Note that this might take a long time. + +```shell +bash download_videos.sh +``` + +Or you can use the following command to downloading AVA videos in parallel using a python script. +```shell +bash download_videos_parallel.sh +``` + +Note that if you happen to have sudoer or have [GNU parallel](https://www.gnu.org/software/parallel/) on your machine, +you can speed up the procedure by downloading in parallel. + +```shell +# sudo apt-get install parallel +bash download_videos_gnu_parallel.sh +``` + +## Step 3. Cut Videos + +Cut each video from its 15th to 30th minute and make them at 30 fps. + +```shell +bash cut_videos.sh +``` + +## Step 4. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. And you can run the following script to soft link the extracted frames. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/ava_extracted/ +ln -s /mnt/SSD/ava_extracted/ ../data/ava/rawframes/ +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +bash extract_rgb_frames_opencv.sh +``` + +Or using ffmpeg to extract RGB frames by the following script. + +```shell +bash extract_rgb_frames_ffmpeg.sh +``` + +If both are required, run the following script to extract frames. + +```shell +bash extract_frames.sh +``` + +## Step 5. Fetching Proposal Files + +The scripts are adapted from FAIR's [Long-Term Feature Banks](https://github.com/facebookresearch/video-long-term-feature-banks). + +Run the follow scripts to fetch pre-computed proposal list. + +```shell +bash fetch_ava_proposals.sh +``` + +## Step 6. Folder Structure + +After the whole data pipeline for AVA preparation. +you can get the rawframes (RGB + Flow), videos and annotation files for AVA. + +In the context of the whole project (for AVA only), the *minimal* folder structure will look like: +(*minimal* means that some data are not necessary: for example, you may want to evaluate AVA using the original video format.) + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── ava +│ │ ├── ava_dense_proposals_train.FAIR.recall_93.9.pkl +│ │ ├── ava_dense_proposals_val.FAIR.recall_93.9.pkl +│ │ ├── ava_dense_proposals_test.FAIR.recall_93.9.pkl +│ │ ├── annotations +│ │ ├── videos +│ │ │ ├── 053oq2xB3oU.mkv +│ │ │ ├── 0f39OWEqJ24.mp4 +│ │ │ ├── ... +│ │ ├── videos_15min +│ │ │ ├── 053oq2xB3oU.mkv +│ │ │ ├── 0f39OWEqJ24.mp4 +│ │ │ ├── ... +│ │ ├── rawframes +│ │ │ ├── 053oq2xB3oU +| │ │ │ ├── img_00001.jpg +| │ │ │ ├── img_00002.jpg +| │ │ │ ├── ... +``` + +For training and evaluating on AVA, please refer to [getting_started](/docs/getting_started.md). + +## Reference + +1. O. Tange (2018): GNU Parallel 2018, March 2018, https://doi.org/10.5281/zenodo.1146014 diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/cut_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/cut_videos.sh new file mode 100644 index 0000000..ec94693 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/cut_videos.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################## + +# Cut each video from its 15th to 30th minute. + +IN_DATA_DIR="../../../data/ava/videos" +OUT_DATA_DIR="../../../data/ava/videos_15min" + +if [[ ! -d "${OUT_DATA_DIR}" ]]; then + echo "${OUT_DATA_DIR} doesn't exist. Creating it."; + mkdir -p ${OUT_DATA_DIR} +fi + +for video in $(ls -A1 -U ${IN_DATA_DIR}/*) +do + out_name="${OUT_DATA_DIR}/${video##*/}" + if [ ! -f "${out_name}" ]; then + ffmpeg -ss 900 -t 901 -r 30 -i "${video}" -strict experimental "${out_name}" + fi +done diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_annotations.sh new file mode 100644 index 0000000..ba4a501 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_annotations.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -e + +VERSION=${VERSION:-"2.1"} +DATA_DIR="../../../data/ava/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://research.google.com/ava/download/ava_v${VERSION}.zip +unzip -j ava_v${VERSION}.zip -d ${DATA_DIR}/ +rm ava_v${VERSION}.zip diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos.sh new file mode 100644 index 0000000..ba8c569 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/ava/videos" +ANNO_DIR="../../../data/ava/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://s3.amazonaws.com/ava-dataset/annotations/ava_file_names_trainval_v2.1.txt -P ${ANNO_DIR} + +cat ${ANNO_DIR}/ava_file_names_trainval_v2.1.txt | +while read vid; + do wget -c "https://s3.amazonaws.com/ava-dataset/trainval/${vid}" -P ${DATA_DIR}; done + +echo "Downloading finished." diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_gnu_parallel.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_gnu_parallel.sh new file mode 100644 index 0000000..6ef5bf1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_gnu_parallel.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/ava/videos" +ANNO_DIR="../../../data/ava/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://s3.amazonaws.com/ava-dataset/annotations/ava_file_names_trainval_v2.1.txt -P ${ANNO_DIR} + +# sudo apt-get install parallel +# parallel downloading to speed up +awk '{print "https://s3.amazonaws.com/ava-dataset/trainval/"$0}' ${ANNO_DIR}/ava_file_names_trainval_v2.1.txt | +parallel -j 8 wget -c -q {} -P ${DATA_DIR} + +echo "Downloading finished." diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.py b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.py new file mode 100644 index 0000000..027e9ce --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.py @@ -0,0 +1,65 @@ +import argparse +import os.path as osp +import subprocess + +import mmcv +from joblib import Parallel, delayed + +URL_PREFIX = 'https://s3.amazonaws.com/ava-dataset/trainval/' + + +def download_video(video_url, output_dir, num_attempts=5): + video_file = osp.basename(video_url) + output_file = osp.join(output_dir, video_file) + + status = False + + if not osp.exists(output_file): + command = ['wget', '-c', video_url, '-P', output_dir] + command = ' '.join(command) + print(command) + attempts = 0 + while True: + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + attempts += 1 + if attempts == num_attempts: + return status, 'Downloading Failed' + else: + break + + status = osp.exists(output_file) + return status, 'Downloaded' + + +def main(source_file, output_dir, num_jobs=24, num_attempts=5): + mmcv.mkdir_or_exist(output_dir) + video_list = open(source_file).read().strip().split('\n') + video_list = [osp.join(URL_PREFIX, video) for video in video_list] + + if num_jobs == 1: + status_list = [] + for video in video_list: + video_list.append(download_video(video, output_dir, num_attempts)) + else: + status_list = Parallel(n_jobs=num_jobs)( + delayed(download_video)(video, output_dir, num_attempts) + for video in video_list) + + mmcv.dump(status_list, 'download_report.json') + + +if __name__ == '__main__': + description = 'Helper script for downloading AVA videos' + parser = argparse.ArgumentParser(description=description) + parser.add_argument( + 'source_file', type=str, help='TXT file containing the video filename') + parser.add_argument( + 'output_dir', + type=str, + help='Output directory where videos will be saved') + parser.add_argument('-n', '--num-jobs', type=int, default=24) + parser.add_argument('--num-attempts', type=int, default=5) + main(**vars(parser.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.sh new file mode 100644 index 0000000..2332922 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/download_videos_parallel.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/ava/videos" +ANNO_DIR="../../../data/ava/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://s3.amazonaws.com/ava-dataset/annotations/ava_file_names_trainval_v2.1.txt -P ${ANNO_DIR} + +python download_videos_parallel.py ${ANNO_DIR}/ava_file_names_trainval_v2.1.txt ${DATA_DIR} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_frames.sh new file mode 100644 index 0000000..68f5cf0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_frames.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/ava/videos_15min/ ../../data/ava/rawframes/ --task both --level 1 --flow-type tvl1 --mixed-ext +echo "Raw frames (RGB and Flow) Generated" +cd ava/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames.sh new file mode 100644 index 0000000..84d21d8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/ava/videos_15min/ ../../data/ava/rawframes/ --task rgb --level 1 --mixed-ext +echo "Genearte raw frames (RGB only)" + +cd ava/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames_ffmpeg.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames_ffmpeg.sh new file mode 100644 index 0000000..fbb04a0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames_ffmpeg.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################## + +# Extract frames from videos. + +IN_DATA_DIR="../../../data/ava/videos_15min" +OUT_DATA_DIR="../../../data/ava/rawframes" + +if [[ ! -d "${OUT_DATA_DIR}" ]]; then + echo "${OUT_DATA_DIR} doesn't exist. Creating it."; + mkdir -p ${OUT_DATA_DIR} +fi + +for video in $(ls -A1 -U ${IN_DATA_DIR}/*) +do + video_name=${video##*/} + + if [[ $video_name = *".webm" ]]; then + video_name=${video_name::-5} + else + video_name=${video_name::-4} + fi + + out_video_dir=${OUT_DATA_DIR}/${video_name}/ + mkdir -p "${out_video_dir}" + + out_name="${out_video_dir}/${video_name}_%06d.jpg" + + ffmpeg -i "${video}" -r 30 -q:v 1 "${out_name}" +done diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..6cf1281 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/extract_rgb_frames_opencv.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/ava/videos_15min/ ../../data/ava/rawframes/ --task rgb --level 1 --use-opencv --mixed-ext +echo "Genearte raw frames (RGB only)" + +cd ava/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ava/fetch_ava_proposals.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ava/fetch_ava_proposals.sh new file mode 100644 index 0000000..42f73c7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ava/fetch_ava_proposals.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/ava/" + +wget https://download.openmmlab.com/mmaction/dataset/ava/ava_dense_proposals_train.FAIR.recall_93.9.pkl -P ${DATA_DIR} +wget https://download.openmmlab.com/mmaction/dataset/ava/ava_dense_proposals_val.FAIR.recall_93.9.pkl -P ${DATA_DIR} +wget https://download.openmmlab.com/mmaction/dataset/ava/ava_dense_proposals_test.FAIR.recall_93.9.pkl -P ${DATA_DIR} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/build_audio_features.py b/Downstream/Open-Set-Action-Recognition/tools/data/build_audio_features.py new file mode 100644 index 0000000..c2b7927 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/build_audio_features.py @@ -0,0 +1,307 @@ +import argparse +import glob +import os +import os.path as osp +import sys +from multiprocessing import Pool + +import mmcv +import numpy as np +from scipy.io import wavfile + +try: + import librosa + import lws +except ImportError: + print('Please import librosa, lws first.') + +sys.path.append('..') + +SILENCE_THRESHOLD = 2 +FMIN = 125 +FMAX = 7600 +FRAME_SHIFT_MS = None +MIN_LEVEL_DB = -100 +REF_LEVEL_DB = 20 +RESCALING = True +RESCALING_MAX = 0.999 +ALLOW_CLIPPING_IN_NORMALIZATION = True +LOG_SCALE_MIN = -32.23619130191664 +NORM_AUDIO = True + + +class AudioTools: + """All methods related to audio feature extraction. Code Reference: + + `_, + `_. + + Args: + frame_rate (int): The frame rate per second of the video. Default: 30. + sample_rate (int): The sample rate for audio sampling. Default: 16000. + num_mels (int): Number of channels of the melspectrogram. Default: 80. + fft_size (int): fft_size / sample_rate is window size. Default: 1280. + hop_size (int): hop_size / sample_rate is step size. Default: 320. + """ + + def __init__(self, + frame_rate=30, + sample_rate=16000, + num_mels=80, + fft_size=1280, + hop_size=320): + self.frame_rate = frame_rate + self.sample_rate = sample_rate + self.silence_threshold = SILENCE_THRESHOLD + self.num_mels = num_mels + self.fmin = FMIN + self.fmax = FMAX + self.fft_size = fft_size + self.hop_size = hop_size + self.frame_shift_ms = FRAME_SHIFT_MS + self.min_level_db = MIN_LEVEL_DB + self.ref_level_db = REF_LEVEL_DB + self.rescaling = RESCALING + self.rescaling_max = RESCALING_MAX + self.allow_clipping_in_normalization = ALLOW_CLIPPING_IN_NORMALIZATION + self.log_scale_min = LOG_SCALE_MIN + self.norm_audio = NORM_AUDIO + + def load_wav(self, path): + """Load an audio file into numpy array.""" + return librosa.core.load(path, sr=self.sample_rate)[0] + + def audio_normalize(self, samples, desired_rms=0.1, eps=1e-4): + """RMS normalize the audio data.""" + rms = np.maximum(eps, np.sqrt(np.mean(samples**2))) + samples = samples * (desired_rms / rms) + return samples + + def generate_spectrogram_magphase(self, audio, with_phase=False): + """Separate a complex-valued spectrogram D into its magnitude (S) + + and phase (P) components, so that D = S * P. + + Args: + audio (np.ndarray): The input audio signal. + with_phase (bool): Determines whether to output the + phase components. Default: False. + + Returns: + np.ndarray: magnitude and phase component of the complex-valued + spectrogram. + """ + spectro = librosa.core.stft( + audio, + hop_length=self.get_hop_size(), + n_fft=self.fft_size, + center=True) + spectro_mag, spectro_phase = librosa.core.magphase(spectro) + spectro_mag = np.expand_dims(spectro_mag, axis=0) + if with_phase: + spectro_phase = np.expand_dims(np.angle(spectro_phase), axis=0) + return spectro_mag, spectro_phase + else: + return spectro_mag + + def save_wav(self, wav, path): + """Save the wav to disk.""" + # 32767 = (2 ^ 15 - 1) maximum of int16 + wav *= 32767 / max(0.01, np.max(np.abs(wav))) + wavfile.write(path, self.sample_rate, wav.astype(np.int16)) + + def trim(self, quantized): + """Trim the audio wavfile.""" + start, end = self.start_and_end_indices(quantized, + self.silence_threshold) + return quantized[start:end] + + def adjust_time_resolution(self, quantized, mel): + """Adjust time resolution by repeating features. + + Args: + quantized (np.ndarray): (T,) + mel (np.ndarray): (N, D) + + Returns: + tuple: Tuple of (T,) and (T, D) + """ + assert quantized.ndim == 1 + assert mel.ndim == 2 + + upsample_factor = quantized.size // mel.shape[0] + mel = np.repeat(mel, upsample_factor, axis=0) + n_pad = quantized.size - mel.shape[0] + if n_pad != 0: + assert n_pad > 0 + mel = np.pad( + mel, [(0, n_pad), (0, 0)], mode='constant', constant_values=0) + + # trim + start, end = self.start_and_end_indices(quantized, + self.silence_threshold) + + return quantized[start:end], mel[start:end, :] + + def start_and_end_indices(self, quantized, silence_threshold=2): + """Trim the audio file when reaches the silence threshold.""" + for start in range(quantized.size): + if abs(quantized[start] - 127) > silence_threshold: + break + for end in range(quantized.size - 1, 1, -1): + if abs(quantized[end] - 127) > silence_threshold: + break + + assert abs(quantized[start] - 127) > silence_threshold + assert abs(quantized[end] - 127) > silence_threshold + + return start, end + + def melspectrogram(self, y): + """Generate the melspectrogram.""" + D = self._lws_processor().stft(y).T + S = self._amp_to_db(self._linear_to_mel(np.abs(D))) - self.ref_level_db + if not self.allow_clipping_in_normalization: + assert S.max() <= 0 and S.min() - self.min_level_db >= 0 + return self._normalize(S) + + def get_hop_size(self): + """Calculate the hop size.""" + hop_size = self.hop_size + if hop_size is None: + assert self.frame_shift_ms is not None + hop_size = int(self.frame_shift_ms / 1000 * self.sample_rate) + return hop_size + + def _lws_processor(self): + """Perform local weighted sum. + + Please refer to `_. + """ + return lws.lws(self.fft_size, self.get_hop_size(), mode='speech') + + def lws_num_frames(self, length, fsize, fshift): + """Compute number of time frames of lws spectrogram. + + Please refer to `_. + """ + pad = (fsize - fshift) + if length % fshift == 0: + M = (length + pad * 2 - fsize) // fshift + 1 + else: + M = (length + pad * 2 - fsize) // fshift + 2 + return M + + def lws_pad_lr(self, x, fsize, fshift): + """Compute left and right padding lws internally uses. + + Please refer to `_. + """ + M = self.lws_num_frames(len(x), fsize, fshift) + pad = (fsize - fshift) + T = len(x) + 2 * pad + r = (M - 1) * fshift + fsize - T + return pad, pad + r + + def _linear_to_mel(self, spectrogram): + """Warp linear scale spectrograms to the mel scale. + + Please refer to `_ + """ + global _mel_basis + _mel_basis = self._build_mel_basis() + return np.dot(_mel_basis, spectrogram) + + def _build_mel_basis(self): + """Build mel filters. + + Please refer to `_ + """ + assert self.fmax <= self.sample_rate // 2 + return librosa.filters.mel( + self.sample_rate, + self.fft_size, + fmin=self.fmin, + fmax=self.fmax, + n_mels=self.num_mels) + + def _amp_to_db(self, x): + min_level = np.exp(self.min_level_db / 20 * np.log(10)) + return 20 * np.log10(np.maximum(min_level, x)) + + def _db_to_amp(self, x): + return np.power(10.0, x * 0.05) + + def _normalize(self, S): + return np.clip((S - self.min_level_db) / -self.min_level_db, 0, 1) + + def _denormalize(self, S): + return (np.clip(S, 0, 1) * -self.min_level_db) + self.min_level_db + + def read_audio(self, audio_path): + wav = self.load_wav(audio_path) + if self.norm_audio: + wav = self.audio_normalize(wav) + else: + wav = wav / np.abs(wav).max() + + return wav + + def audio_to_spectrogram(self, wav): + if self.melspectrogram: + spectrogram = self.melspectrogram(wav).astype(np.float32).T + else: + spectrogram = self.generate_spectrogram_magphase(wav) + return spectrogram + + +def extract_audio_feature(wav_path, audio_tools, mel_out_dir): + file_name, _ = osp.splitext(osp.basename(wav_path)) + # Write the spectrograms to disk: + mel_filename = os.path.join(mel_out_dir, file_name + '.npy') + if not os.path.exists(mel_filename): + try: + wav = audio_tools.read_audio(wav_path) + + spectrogram = audio_tools.audio_to_spectrogram(wav) + + np.save( + mel_filename, + spectrogram.astype(np.float32), + allow_pickle=False) + + except BaseException: + print(f'Read audio [{wav_path}] failed.') + + +if __name__ == '__main__': + audio_tools = AudioTools( + fft_size=512, hop_size=256) # window_size:32ms hop_size:16ms + + parser = argparse.ArgumentParser() + parser.add_argument('audio_home_path', type=str) + parser.add_argument('spectrogram_save_path', type=str) + parser.add_argument('--level', type=int, default=1) + parser.add_argument('--ext', default='.m4a') + parser.add_argument('--num-workers', type=int, default=4) + parser.add_argument('--part', type=str, default='1/1') + args = parser.parse_args() + + mmcv.mkdir_or_exist(args.spectrogram_save_path) + + files = glob.glob( + osp.join(args.audio_home_path, '*/' * args.level, '*' + args.ext)) + print(f'found {len(files)} files.') + files = sorted(files) + if args.part is not None: + [this_part, num_parts] = [int(i) for i in args.part.split('/')] + part_len = len(files) // num_parts + + p = Pool(args.num_workers) + for file in files[part_len * (this_part - 1):( + part_len * this_part) if this_part != num_parts else len(files)]: + p.apply_async( + extract_audio_feature, + args=(file, audio_tools, args.spectrogram_save_path)) + p.close() + p.join() diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/build_file_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/build_file_list.py new file mode 100644 index 0000000..3233a80 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/build_file_list.py @@ -0,0 +1,263 @@ +import argparse +import glob +import json +import os.path as osp +import random + +from mmcv.runner import set_random_seed +from tools.data.anno_txt2json import lines2dictlist +from tools.data.parse_file_list import (parse_directory, parse_hmdb51_split, + parse_jester_splits, + parse_kinetics_splits, + parse_mit_splits, parse_mmit_splits, + parse_sthv1_splits, parse_sthv2_splits, + parse_ucf101_splits) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Build file list') + parser.add_argument( + 'dataset', + type=str, + choices=[ + 'ucf101', 'kinetics10', 'kinetics400', 'kinetics600', 'kinetics700', 'thumos14', + 'sthv1', 'sthv2', 'mit', 'mmit', 'activitynet', 'hmdb51', 'jester' + ], + help='dataset to be built file list') + parser.add_argument( + 'src_folder', type=str, help='root directory for the frames or videos') + parser.add_argument( + '--rgb-prefix', type=str, default='img_', help='prefix of rgb frames') + parser.add_argument( + '--flow-x-prefix', + type=str, + default='flow_x_', + help='prefix of flow x frames') + parser.add_argument( + '--flow-y-prefix', + type=str, + default='flow_y_', + help='prefix of flow y frames') + parser.add_argument( + '--num-split', + type=int, + default=3, + help='number of split to file list') + parser.add_argument( + '--subset', + type=str, + default='train', + choices=['train', 'val', 'test'], + help='subset to generate file list') + parser.add_argument( + '--level', + type=int, + default=2, + choices=[1, 2], + help='directory level of data') + parser.add_argument( + '--format', + type=str, + default='rawframes', + choices=['rawframes', 'videos'], + help='data format') + parser.add_argument( + '--out-root-path', + type=str, + default='data/', + help='root path for output') + parser.add_argument( + '--output-format', + type=str, + default='txt', + choices=['txt', 'json'], + help='built file list format') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--shuffle', + action='store_true', + default=False, + help='whether to shuffle the file list') + args = parser.parse_args() + + return args + + +def build_file_list(splits, frame_info, shuffle=False): + """Build file list for a certain data split. + + Args: + splits (tuple): Data split to generate file list. + frame_info (dict): Dict mapping from frames to path. e.g., + 'Skiing/v_Skiing_g18_c02': ('data/ucf101/rawframes/Skiing/v_Skiing_g18_c02', 0, 0). # noqa: E501 + shuffle (bool): Whether to shuffle the file list. + + Returns: + tuple: RGB file list for training and testing, together with + Flow file list for training and testing. + """ + + def build_list(split): + """Build RGB and Flow file list with a given split. + + Args: + split (list): Split to be generate file list. + + Returns: + tuple[list, list]: (rgb_list, flow_list), rgb_list is the + generated file list for rgb, flow_list is the generated + file list for flow. + """ + rgb_list, flow_list = list(), list() + for item in split: + if item[0] not in frame_info: + continue + elif frame_info[item[0]][1] > 0: + # rawframes + rgb_cnt = frame_info[item[0]][1] + flow_cnt = frame_info[item[0]][2] + if isinstance(item[1], int): + rgb_list.append(f'{item[0]} {rgb_cnt} {item[1]}\n') + flow_list.append(f'{item[0]} {flow_cnt} {item[1]}\n') + elif isinstance(item[1], list): + # only for multi-label datasets like mmit + rgb_list.append(f'{item[0]} {rgb_cnt} ' + + ' '.join([str(digit) + for digit in item[1]]) + '\n') + rgb_list.append(f'{item[0]} {flow_cnt} ' + + ' '.join([str(digit) + for digit in item[1]]) + '\n') + else: + raise ValueError( + 'frame_info should be ' + + '[`video`(str), `label`(int)|`labels(list[int])`') + else: + # videos + if isinstance(item[1], int): + rgb_list.append(f'{frame_info[item[0]][0]} {item[1]}\n') + flow_list.append(f'{frame_info[item[0]][0]} {item[1]}\n') + elif isinstance(item[1], list): + # only for multi-label datasets like mmit + rgb_list.append(f'{frame_info[item[0]][0]} ' + + ' '.join([str(digit) + for digit in item[1]]) + '\n') + flow_list.append( + f'{frame_info[item[0]][0]} ' + + ' '.join([str(digit) for digit in item[1]]) + '\n') + else: + raise ValueError( + 'frame_info should be ' + + '[`video`(str), `label`(int)|`labels(list[int])`') + if shuffle: + random.shuffle(rgb_list) + random.shuffle(flow_list) + return rgb_list, flow_list + + train_rgb_list, train_flow_list = build_list(splits[0]) + test_rgb_list, test_flow_list = build_list(splits[1]) + return (train_rgb_list, test_rgb_list), (train_flow_list, test_flow_list) + + +def main(): + args = parse_args() + + if args.seed is not None: + print(f'Set random seed to {args.seed}') + set_random_seed(args.seed) + + if args.format == 'rawframes': + frame_info = parse_directory( + args.src_folder, + rgb_prefix=args.rgb_prefix, + flow_x_prefix=args.flow_x_prefix, + flow_y_prefix=args.flow_y_prefix, + level=args.level) + elif args.format == 'videos': + if args.level == 1: + # search for one-level directory + video_list = glob.glob(osp.join(args.src_folder, '*')) + elif args.level == 2: + # search for two-level directory + video_list = glob.glob(osp.join(args.src_folder, '*', '*')) + else: + raise ValueError(f'level must be 1 or 2, but got {args.level}') + frame_info = {} + for video in video_list: + video_path = osp.relpath(video, args.src_folder) + # video_id: (video_relative_path, -1, -1) + frame_info[osp.splitext(video_path)[0]] = (video_path, -1, -1) + else: + raise NotImplementedError('only rawframes and videos are supported') + + if args.dataset == 'ucf101': + splits = parse_ucf101_splits(args.level) + elif args.dataset == 'sthv1': + splits = parse_sthv1_splits(args.level) + elif args.dataset == 'sthv2': + splits = parse_sthv2_splits(args.level) + elif args.dataset == 'mit': + splits = parse_mit_splits() + elif args.dataset == 'mmit': + splits = parse_mmit_splits() + elif args.dataset in ['kinetics10', 'kinetics400', 'kinetics600', 'kinetics700']: + splits = parse_kinetics_splits(args.level, args.dataset) + elif args.dataset == 'hmdb51': + splits = parse_hmdb51_split(args.level) + elif args.dataset == 'jester': + splits = parse_jester_splits(args.level) + else: + raise ValueError( + f"Supported datasets are 'ucf101, sthv1, sthv2', 'jester', " + f"'mmit', 'mit', 'kinetics10', 'kinetics400', 'kinetics600', 'kinetics700', but " + f'got {args.dataset}') + + assert len(splits) == args.num_split + + out_path = args.out_root_path + args.dataset + + if len(splits) > 1: + for i, split in enumerate(splits): + file_lists = build_file_list( + split, frame_info, shuffle=args.shuffle) + train_name = f'{args.dataset}_train_split_{i+1}_{args.format}.txt' + val_name = f'{args.dataset}_val_split_{i+1}_{args.format}.txt' + if args.output_format == 'txt': + with open(osp.join(out_path, train_name), 'w') as f: + f.writelines(file_lists[0][0]) + with open(osp.join(out_path, val_name), 'w') as f: + f.writelines(file_lists[0][1]) + elif args.output_format == 'json': + train_list = lines2dictlist(file_lists[0][0], args.format) + val_list = lines2dictlist(file_lists[0][1], args.format) + train_name = train_name.replace('.txt', '.json') + val_name = val_name.replace('.txt', '.json') + with open(osp.join(out_path, train_name), 'w') as f: + json.dump(train_list, f) + with open(osp.join(out_path, val_name), 'w') as f: + json.dump(val_list, f) + else: + lists = build_file_list(splits[0], frame_info, shuffle=args.shuffle) + + if args.subset == 'train': + ind = 0 + elif args.subset == 'val': + ind = 1 + elif args.subset == 'test': + ind = 2 + else: + raise ValueError(f"subset must be in ['train', 'val', 'test'], " + f'but got {args.subset}.') + + filename = f'{args.dataset}_{args.subset}_list_{args.format}.txt' + if args.output_format == 'txt': + with open(osp.join(out_path, filename), 'w') as f: + f.writelines(lists[0][ind]) + elif args.output_format == 'json': + data_list = lines2dictlist(lists[0][ind], args.format) + filename = filename.replace('.txt', '.json') + with open(osp.join(out_path, filename), 'w') as f: + json.dump(data_list, f) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/build_rawframes.py b/Downstream/Open-Set-Action-Recognition/tools/data/build_rawframes.py new file mode 100644 index 0000000..bfb22f8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/build_rawframes.py @@ -0,0 +1,239 @@ +import argparse +import glob +import os +import os.path as osp +import sys +import warnings +from multiprocessing import Pool + +import mmcv +import numpy as np + + +def extract_frame(vid_item): + """Generate optical flow using dense flow. + + Args: + vid_item (list): Video item containing video full path, + video (short) path, video id. + + Returns: + bool: Whether generate optical flow successfully. + """ + full_path, vid_path, vid_id, method, task = vid_item + if '/' in vid_path: + act_name = osp.basename(osp.dirname(vid_path)) + out_full_path = osp.join(args.out_dir, act_name) + else: + out_full_path = args.out_dir + + if task == 'rgb': + if args.use_opencv: + # Not like using denseflow, + # Use OpenCV will not make a sub directory with the video name + video_name = osp.splitext(osp.basename(vid_path))[0] + out_full_path = osp.join(out_full_path, video_name) + + vr = mmcv.VideoReader(full_path) + for i in range(len(vr)): + if vr[i] is not None: + w, h, c = np.shape(vr[i]) + if args.new_short == 0: + if args.new_width == 0 or args.new_height == 0: + # Keep original shape + out_img = vr[i] + else: + out_img = mmcv.imresize(vr[i], (args.new_width, + args.new_height)) + else: + if min(h, w) == h: + new_h = args.new_short + new_w = int((new_h / h) * w) + else: + new_w = args.new_short + new_h = int((new_w / w) * h) + out_img = mmcv.imresize(vr[i], (new_h, new_w)) + mmcv.imwrite(out_img, + f'{out_full_path}/img_{i + 1:05d}.jpg') + else: + warnings.warn( + 'Length inconsistent!' + f'Early stop with {i + 1} out of {len(vr)} frames.') + break + else: + if args.new_short == 0: + cmd = osp.join( + f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'" + f' -nw={args.new_width} -nh={args.new_height} -v') + else: + cmd = osp.join( + f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'" + f' -ns={args.new_short} -v') + os.system(cmd) + elif task == 'flow': + if args.input_frames: + if args.new_short == 0: + cmd = osp.join( + f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501 + f' -nw={args.new_width} --nh={args.new_height} -v --if') + else: + cmd = osp.join( + f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501 + f' -ns={args.new_short} -v --if') + else: + if args.new_short == 0: + cmd = osp.join( + f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501 + f' -nw={args.new_width} --nh={args.new_height} -v') + else: + cmd = osp.join( + f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501 + f' -ns={args.new_short} -v') + os.system(cmd) + else: + if args.new_short == 0: + cmd_rgb = osp.join( + f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'" + f' -nw={args.new_width} -nh={args.new_height} -v') + cmd_flow = osp.join( + f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501 + f' -nw={args.new_width} -nh={args.new_height} -v') + else: + cmd_rgb = osp.join( + f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'" + f' -ns={args.new_short} -v') + cmd_flow = osp.join( + f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501 + f' -ns={args.new_short} -v') + os.system(cmd_rgb) + os.system(cmd_flow) + + print(f'{task} {vid_id} {vid_path} {method} done') + sys.stdout.flush() + return True + + +def parse_args(): + parser = argparse.ArgumentParser(description='extract optical flows') + parser.add_argument('src_dir', type=str, help='source video directory') + parser.add_argument('out_dir', type=str, help='output rawframe directory') + parser.add_argument( + '--task', + type=str, + default='flow', + choices=['rgb', 'flow', 'both'], + help='which type of frames to be extracted') + parser.add_argument( + '--level', + type=int, + choices=[1, 2], + default=2, + help='directory level of data') + parser.add_argument( + '--num-worker', + type=int, + default=8, + help='number of workers to build rawframes') + parser.add_argument( + '--flow-type', + type=str, + default=None, + choices=[None, 'tvl1', 'warp_tvl1', 'farn', 'brox'], + help='flow type to be generated') + parser.add_argument( + '--out-format', + type=str, + default='jpg', + choices=['jpg', 'h5', 'png'], + help='output format') + parser.add_argument( + '--ext', + type=str, + default='avi', + choices=['avi', 'mp4', 'webm'], + help='video file extensions') + parser.add_argument( + '--mixed-ext', + action='store_true', + help='process video files with mixed extensions') + parser.add_argument( + '--new-width', type=int, default=0, help='resize image width') + parser.add_argument( + '--new-height', type=int, default=0, help='resize image height') + parser.add_argument( + '--new-short', + type=int, + default=0, + help='resize image short side length keeping ratio') + parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU') + parser.add_argument( + '--resume', + action='store_true', + default=False, + help='resume optical flow extraction instead of overwriting') + parser.add_argument( + '--use-opencv', + action='store_true', + help='Whether to use opencv to extract rgb frames') + parser.add_argument( + '--input-frames', + action='store_true', + help='Whether to extract flow frames based on rgb frames') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if not osp.isdir(args.out_dir): + print(f'Creating folder: {args.out_dir}') + os.makedirs(args.out_dir) + + if args.level == 2: + classes = os.listdir(args.src_dir) + for classname in classes: + new_dir = osp.join(args.out_dir, classname) + if not osp.isdir(new_dir): + print(f'Creating folder: {new_dir}') + os.makedirs(new_dir) + + if args.input_frames: + print('Reading rgb frames from folder: ', args.src_dir) + fullpath_list = glob.glob(args.src_dir + '/*' * args.level) + done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level) + print('Total number of rgb frame folders found: ', len(fullpath_list)) + else: + print('Reading videos from folder: ', args.src_dir) + if args.mixed_ext: + print('Extension of videos is mixed') + fullpath_list = glob.glob(args.src_dir + '/*' * args.level) + done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level) + else: + print('Extension of videos: ', args.ext) + fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' + + args.ext) + done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level) + print('Total number of videos found: ', len(fullpath_list)) + + if args.resume: + fullpath_list = set(fullpath_list).difference(set(done_fullpath_list)) + fullpath_list = list(fullpath_list) + print('Resuming. number of videos to be done: ', len(fullpath_list)) + + if args.level == 2: + vid_list = list( + map( + lambda p: osp.join( + osp.basename(osp.dirname(p)), osp.basename(p)), + fullpath_list)) + elif args.level == 1: + vid_list = list(map(lambda p: osp.basename(p), fullpath_list)) + + pool = Pool(args.num_worker) + pool.map( + extract_frame, + zip(fullpath_list, vid_list, range(len(vid_list)), + len(vid_list) * [args.flow_type], + len(vid_list) * [args.task])) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/build_videos.py b/Downstream/Open-Set-Action-Recognition/tools/data/build_videos.py new file mode 100644 index 0000000..e6e1f78 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/build_videos.py @@ -0,0 +1,126 @@ +import argparse +import glob +import os +import os.path as osp +import sys +from multiprocessing import Pool + + +def encode_video(frame_dir_item): + """Encode frames to video using ffmpeg. + + Args: + frame_dir_item (list): Rawframe item containing raw frame directory + full path, rawframe directory (short) path, rawframe directory id. + + Returns: + bool: Whether synthesize video successfully. + """ + full_path, frame_dir_path, frame_dir_id = frame_dir_item + out_full_path = args.out_dir + + img_name_tmpl = args.filename_tmpl + '.' + args.in_format + img_path = osp.join(full_path, img_name_tmpl) + + out_vid_name = frame_dir_path + '.' + args.ext + out_vid_path = osp.join(out_full_path, out_vid_name) + + cmd = osp.join( + f"ffmpeg -start_number {args.start_idx} -r {args.fps} -i '{img_path}' " + f"-vcodec {args.vcodec} '{out_vid_path}'") + os.system(cmd) + + print(f'{frame_dir_id} {frame_dir_path} done') + sys.stdout.flush() + return True + + +def parse_args(): + parser = argparse.ArgumentParser(description='synthesize videos') + parser.add_argument('src_dir', type=str, help='source rawframe directory') + parser.add_argument('out_dir', type=str, help='output video directory') + parser.add_argument( + '--fps', type=int, default=30, help='fps of videos to be synthesized') + parser.add_argument( + '--level', + type=int, + choices=[1, 2], + default=2, + help='directory level of data') + parser.add_argument( + '--num-worker', + type=int, + default=8, + help='number of workers to build videos') + parser.add_argument( + '--in-format', + type=str, + default='jpg', + choices=['jpg', 'png'], + help='input format') + parser.add_argument( + '--start-idx', type=int, default=0, help='starting index of rawframes') + parser.add_argument( + '--filename-tmpl', + type=str, + default='img_%05d', + help='filename template of rawframes') + parser.add_argument( + '--vcodec', type=str, default='mpeg4', help='coding method of videos') + parser.add_argument( + '--ext', + type=str, + default='mp4', + choices=['mp4', 'avi'], + help='video file extensions') + parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU') + parser.add_argument( + '--resume', + action='store_true', + default=False, + help='resume optical flow extraction instead of overwriting') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if not osp.isdir(args.out_dir): + print(f'Creating folder: {args.out_dir}') + os.makedirs(args.out_dir) + + if args.level == 2: + classes = os.listdir(args.src_dir) + for classname in classes: + new_dir = osp.join(args.out_dir, classname) + if not osp.isdir(new_dir): + print(f'Creating folder: {new_dir}') + os.makedirs(new_dir) + + print('Reading rgb frames from folder: ', args.src_dir) + print('Input format of rgb frames: ', args.in_format) + fullpath_list = glob.glob(args.src_dir + '/*' * args.level) + done_fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' + + args.ext) + print('Total number of rgb frame folders found: ', len(fullpath_list)) + + if args.resume: + fullpath_list = set(fullpath_list).difference(set(done_fullpath_list)) + fullpath_list = list(fullpath_list) + print('Resuming. number of videos to be synthesized: ', + len(fullpath_list)) + + if args.level == 2: + frame_dir_list = list( + map( + lambda p: osp.join( + osp.basename(osp.dirname(p)), osp.basename(p)), + fullpath_list)) + elif args.level == 1: + frame_dir_list = list(map(lambda p: osp.basename(p), fullpath_list)) + + pool = Pool(args.num_worker) + pool.map(encode_video, + zip(fullpath_list, frame_dir_list, range(len(frame_dir_list)))) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/data_check.py b/Downstream/Open-Set-Action-Recognition/tools/data/data_check.py new file mode 100644 index 0000000..6f003a8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/data_check.py @@ -0,0 +1,50 @@ +import os, argparse +import cv2 +from tqdm import tqdm + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Check the data') + parser.add_argument('video_path', type=str, help='The video path') + parser.add_argument('dataset_list', type=str, help='The list file of dataset.') + parser.add_argument('split', type=str, choices=['train', 'test', 'val'], help='The split of the data.') + args = parser.parse_args() + + # parse the filelist into list + filelist, labels = [], [] + assert os.path.exists(args.dataset_list), 'File list does not exist! %s'%(args.dataset_list) + with open(args.dataset_list, 'r') as f: + for line in f.readlines(): + filename, label = line.strip().split(' ') + filelist.append(filename) + labels.append(label) + # checking + valid_files, invalid_files, valid_labels = [], [], [] + for filename, label in tqdm(zip(filelist, labels), total=len(filelist), desc=args.split): + videofile = os.path.join(args.video_path, filename) + if not os.path.exists(videofile): + # file not exist + invalid_files.append(filename) + else: + # file cannot be read + cap = cv2.VideoCapture(videofile) + ret, frame = cap.read() + if not ret: + invalid_files.append(filename) + else: + valid_files.append(filename) + valid_labels.append(label) + cap.release() + # print + print('Valid file number: %d, Invalid number: %d'%(len(valid_files), len(invalid_files))) + + if len(invalid_files) > 0: + tmp_listfile = os.path.join(os.path.dirname(args.dataset_list), args.dataset_list.split('/')[-1][:-4] + '_temp.txt') + with open(tmp_listfile, 'w') as f: + for filename, label in zip(valid_files, valid_labels): + f.writelines('%s %s\n'%(filename, label)) + + print('\nFollowing files are invalid: ') + for filename in invalid_files: + print(invalid_files) + \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/denormalize_proposal_file.py b/Downstream/Open-Set-Action-Recognition/tools/data/denormalize_proposal_file.py new file mode 100644 index 0000000..ded7e91 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/denormalize_proposal_file.py @@ -0,0 +1,82 @@ +import argparse +import os.path as osp + +from tools.data.parse_file_list import parse_directory + +from mmaction.localization import load_localize_proposal_file + + +def process_norm_proposal_file(norm_proposal_file, frame_dict): + """Process the normalized proposal file and denormalize it. + + Args: + norm_proposal_file (str): Name of normalized proposal file. + frame_dict (dict): Information of frame folders. + """ + proposal_file = norm_proposal_file.replace('normalized_', '') + norm_proposals = load_localize_proposal_file(norm_proposal_file) + + processed_proposal_list = [] + for idx, norm_proposal in enumerate(norm_proposals): + video_id = norm_proposal[0] + frame_info = frame_dict[video_id] + num_frames = frame_info[1] + frame_path = osp.basename(frame_info[0]) + + gt = [[ + int(x[0]), + int(float(x[1]) * num_frames), + int(float(x[2]) * num_frames) + ] for x in norm_proposal[2]] + + proposal = [[ + int(x[0]), + float(x[1]), + float(x[2]), + int(float(x[3]) * num_frames), + int(float(x[4]) * num_frames) + ] for x in norm_proposal[3]] + + gt_dump = '\n'.join(['{} {} {}'.format(*x) for x in gt]) + gt_dump += '\n' if len(gt) else '' + proposal_dump = '\n'.join( + ['{} {:.04f} {:.04f} {} {}'.format(*x) for x in proposal]) + proposal_dump += '\n' if len(proposal) else '' + + processed_proposal_list.append( + f'# {idx}\n{frame_path}\n{num_frames}\n1' + f'\n{len(gt)}\n{gt_dump}{len(proposal)}\n{proposal_dump}') + + with open(proposal_file, 'w') as f: + f.writelines(processed_proposal_list) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Denormalize proposal file') + parser.add_argument( + 'dataset', + type=str, + choices=['thumos14'], + help='dataset to be denormalize proposal file') + parser.add_argument( + '--norm-proposal-file', + type=str, + help='normalized proposal file to be denormalize') + parser.add_argument( + '--data-prefix', + type=str, + help='path to a directory where rawframes are held') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + print(f'Converting from {args.norm_proposal_file}.') + frame_dict = parse_directory(args.data_prefix) + process_norm_proposal_file(args.norm_proposal_file, frame_dict) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/extract_audio.py b/Downstream/Open-Set-Action-Recognition/tools/data/extract_audio.py new file mode 100644 index 0000000..1840bc4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/extract_audio.py @@ -0,0 +1,61 @@ +import argparse +import glob +import os +import os.path as osp +from multiprocessing import Pool + +import mmcv + + +def extract_audio_wav(line): + """Extract the audio wave from video streams using FFMPEG.""" + video_id, _ = osp.splitext(osp.basename(line)) + video_dir = osp.dirname(line) + video_rel_dir = osp.relpath(video_dir, args.root) + dst_dir = osp.join(args.dst_root, video_rel_dir) + os.popen(f'mkdir -p {dst_dir}') + try: + if osp.exists(f'{dst_dir}/{video_id}.wav'): + return + cmd = f'ffmpeg -i ./{line} -map 0:a -y {dst_dir}/{video_id}.wav' + os.popen(cmd) + except BaseException: + with open('extract_wav_err_file.txt', 'a+') as f: + f.write(f'{line}\n') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Extract audios') + parser.add_argument('root', type=str, help='source video directory') + parser.add_argument('dst_root', type=str, help='output audio directory') + parser.add_argument( + '--level', type=int, default=2, help='directory level of data') + parser.add_argument( + '--ext', + type=str, + default='mp4', + choices=['avi', 'mp4', 'webm'], + help='video file extensions') + parser.add_argument( + '--num-worker', type=int, default=8, help='number of workers') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + mmcv.mkdir_or_exist(args.out_dir) + + print('Reading videos from folder: ', args.src_dir) + print('Extension of videos: ', args.ext) + fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' + + args.ext) + done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level + '.wav') + print('Total number of videos found: ', len(fullpath_list)) + print('Total number of videos extracted finished: ', + len(done_fullpath_list)) + + pool = Pool(args.num_worker) + pool.map(extract_audio_wav, fullpath_list) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/gym/README.md new file mode 100644 index 0000000..a86c855 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/README.md @@ -0,0 +1,106 @@ +# Preparing GYM + +## Introduction + +``` +@inproceedings{shao2020finegym, + title={Finegym: A hierarchical video dataset for fine-grained action understanding}, + author={Shao, Dian and Zhao, Yue and Dai, Bo and Lin, Dahua}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={2616--2625}, + year={2020} +} +``` + +For basic dataset information, please refer to the official [project](https://sdolivia.github.io/FineGym/) and the [paper](https://arxiv.org/abs/2004.06704). +We currently provide the data pre-processing pipeline for GYM99. +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/gym/`. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_annotations.sh +``` + +## Step 2. Prepare Videos + +Then, you can run the following script to prepare videos. +The codes are adapted from the [official crawler](https://github.com/activitynet/ActivityNet/tree/master/Crawler/Kinetics). Note that this might take a long time. + +```shell +bash download_videos.sh +``` + +## Step 3. Trim Videos into Events. + +First, you need to trim long videos into events based on the annotation of GYM with the following scripts. + +```shell +python trim_event.py +``` + +## Step 4. Trim Events into Subactions. + +Then, you need to trim events into subactions based on the annotation of GYM with the following scripts. We use the two stage trimming for better efficiency (trimming multiple short clips from a long video can be extremely inefficient, since you need to go over the video many times). + +```shell +python trim_subaction.py +``` + +## Step 5. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader for RGB model training. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +Run the following script to extract both rgb and flow using "tvl1" algorithm. + +```shell +bash extract_frames.sh +``` + +## Step 6. Generate file list for GYM99 based on extracted subactions. + +You can use the following script to generate train / val lists for GYM99. + +```shell +python generate_file_list.py +``` + +## Step 7. Folder Structure + +After the whole data pipeline for GYM preparation. You can get the subaction clips, event clips, raw videos and GYM99 train/val lists. + +In the context of the whole project (for GYM only), the full folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── gym +| | ├── annotations +| | | ├── gym99_train_org.txt +| | | ├── gym99_val_org.txt +| | | ├── gym99_train.txt +| | | ├── gym99_val.txt +| | | ├── annotation.json +| | | └── event_annotation.json +│ │ ├── videos +| | | ├── 0LtLS9wROrk.mp4 +| | | ├── ... +| | | └── zfqS-wCJSsw.mp4 +│ │ ├── events +| | | ├── 0LtLS9wROrk_E_002407_002435.mp4 +| | | ├── ... +| | | └── zfqS-wCJSsw_E_006732_006824.mp4 +│ │ └── subactions +| | ├── 0LtLS9wROrk_E_002407_002435_A_0003_0005.mp4 +| | ├── ... +| | └── zfqS-wCJSsw_E_006244_006252_A_0000_0007.mp4 +``` + +For training and evaluating on GYM, please refer to [getting_started](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/download.py b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download.py new file mode 100644 index 0000000..51b54c2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download.py @@ -0,0 +1,99 @@ +# This scripts is copied from +# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501 +# The code is licensed under the MIT licence. +import argparse +import os +import ssl +import subprocess + +import mmcv +from joblib import Parallel, delayed + +ssl._create_default_https_context = ssl._create_unverified_context + + +def download(video_identifier, + output_filename, + num_attempts=5, + url_base='https://www.youtube.com/watch?v='): + """Download a video from youtube if exists and is not blocked. + arguments: + --------- + video_identifier: str + Unique YouTube video identifier (11 characters) + output_filename: str + File path where the video will be stored. + """ + # Defensive argument checking. + assert isinstance(video_identifier, str), 'video_identifier must be string' + assert isinstance(output_filename, str), 'output_filename must be string' + assert len(video_identifier) == 11, 'video_identifier must have length 11' + + status = False + + if not os.path.exists(output_filename): + command = [ + 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate', + '-f', 'mp4', '-o', + '"%s"' % output_filename, + '"%s"' % (url_base + video_identifier) + ] + command = ' '.join(command) + print(command) + attempts = 0 + while True: + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + attempts += 1 + if attempts == num_attempts: + return status, 'Fail' + else: + break + # Check if the video was successfully saved. + status = os.path.exists(output_filename) + return status, 'Downloaded' + + +def download_wrapper(youtube_id, output_dir): + """Wrapper for parallel processing purposes.""" + # we do this to align with names in annotations + output_filename = os.path.join(output_dir, youtube_id + '.mp4') + if os.path.exists(output_filename): + status = tuple([youtube_id, True, 'Exists']) + return status + + downloaded, log = download(youtube_id, output_filename) + status = tuple([youtube_id, downloaded, log]) + return status + + +def main(input, output_dir, num_jobs=24): + # Reading and parsing ActivityNet. + youtube_ids = mmcv.load(input).keys() + # Creates folders where videos will be saved later. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + # Download all clips. + if num_jobs == 1: + status_list = [] + for index in youtube_ids: + status_list.append(download_wrapper(index, output_dir)) + else: + status_list = Parallel(n_jobs=num_jobs)( + delayed(download_wrapper)(index, output_dir) + for index in youtube_ids) + + # Save download report. + mmcv.dump(status_list, 'download_report.json') + + +if __name__ == '__main__': + description = 'Helper script for downloading GYM videos.' + p = argparse.ArgumentParser(description=description) + p.add_argument('input', type=str, help='The gym annotation file') + p.add_argument( + 'output_dir', type=str, help='Output directory to save videos.') + p.add_argument('-n', '--num-jobs', type=int, default=24) + main(**vars(p.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_annotations.sh new file mode 100644 index 0000000..4922104 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_annotations.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/gym/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://sdolivia.github.io/FineGym/resources/dataset/finegym_annotation_info_v1.0.json -O $DATA_DIR/annotation.json +wget https://sdolivia.github.io/FineGym/resources/dataset/gym99_train_element_v1.0.txt -O $DATA_DIR/gym99_train_org.txt +wget https://sdolivia.github.io/FineGym/resources/dataset/gym99_val_element.txt -O $DATA_DIR/gym99_val_org.txt diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_videos.sh new file mode 100644 index 0000000..ce8d901 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_videos.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# set up environment +conda env create -f environment.yml +source activate gym +pip install --upgrade youtube-dl + +DATA_DIR="../../../data/gym" +ANNO_DIR="../../../data/gym/annotations" +python download_ytdl.py ${ANNO_DIR}/annotation.json ${DATA_DIR}/videos + +source deactivate gym +conda remove -n gym --all diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_ytdl.py b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_ytdl.py new file mode 100644 index 0000000..e718f9d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/download_ytdl.py @@ -0,0 +1,133 @@ +# This scripts is copied from +# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501 +# The code is licensed under the MIT licence. +import argparse +import os +import ssl +import subprocess + +import mmcv +from joblib import Parallel, delayed +import youtube_dl +import glob + +ssl._create_default_https_context = ssl._create_unverified_context + + +# def download(video_identifier, +# output_filename, +# num_attempts=5, +# url_base='https://www.youtube.com/watch?v='): +# """Download a video from youtube if exists and is not blocked. +# arguments: +# --------- +# video_identifier: str +# Unique YouTube video identifier (11 characters) +# output_filename: str +# File path where the video will be stored. +# """ +# # Defensive argument checking. +# assert isinstance(video_identifier, str), 'video_identifier must be string' +# assert isinstance(output_filename, str), 'output_filename must be string' +# assert len(video_identifier) == 11, 'video_identifier must have length 11' + +# status = False + +# if not os.path.exists(output_filename): +# command = [ +# 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate', +# '-f', 'mp4', '-o', +# '"%s"' % output_filename, +# '"%s"' % (url_base + video_identifier) +# ] +# command = ' '.join(command) +# print(command) +# attempts = 0 +# while True: +# try: +# subprocess.check_output( +# command, shell=True, stderr=subprocess.STDOUT) +# except subprocess.CalledProcessError: +# attempts += 1 +# if attempts == num_attempts: +# return status, 'Fail' +# else: +# break +# # Check if the video was successfully saved. +# status = os.path.exists(output_filename) +# return status, 'Downloaded' + + +# def download_wrapper(youtube_id, output_dir): +# """Wrapper for parallel processing purposes.""" +# # we do this to align with names in annotations +# output_filename = os.path.join(output_dir, youtube_id + '.mp4') +# if os.path.exists(output_filename): +# status = tuple([youtube_id, True, 'Exists']) +# return status + +# downloaded, log = download(youtube_id, output_filename) +# status = tuple([youtube_id, downloaded, log]) +# return status + + +def main(input, output_dir, num_jobs=24): + # Reading and parsing ActivityNet. + youtube_ids = mmcv.load(input).keys() + # Creates folders where videos will be saved later. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + # Download all clips. + + ydl_opt = {'outtmpl': output_dir + '%(id)s.%(ext)s', + 'format': 'mp4', + 'quiet': True, + 'no-warnings': True, + 'ignoreerros': True, + 'no-check-certificate': True, + 'skipdownload': True} + ydl = youtube_dl.YoutubeDL(ydl_opt) + + url_list = [] + for vid in youtube_ids: + url = "https://www.youtube.com/watch?v=" + vid + "&has_verified=1" + url_list.append(url) + print('Number of videos to be downloaded: %d'%(len(url_list))) + + failed_files = './failed_list.txt' + with open(failed_files, 'w') as f: + for vid, url in zip(youtube_ids, url_list): + filename = os.path.join(output_dir, vid + '.mp4') + if not os.path.exists(filename): + f.writelines(url + '\n') + + + try: + ydl.download(url_list) + except: + print('errors ocurred!') + print("Download finished!") + all_videos = sorted(glob.glob(output_dir + '*.mp4')) + print("Number of videos: ", len(all_videos)) + + # if num_jobs == 1: + # status_list = [] + # for index in youtube_ids: + # status_list.append(download_wrapper(index, output_dir)) + # else: + # status_list = Parallel(n_jobs=num_jobs)( + # delayed(download_wrapper)(index, output_dir) + # for index in youtube_ids) + + # # Save download report. + # mmcv.dump(status_list, 'download_report.json') + + +if __name__ == '__main__': + description = 'Helper script for downloading GYM videos.' + p = argparse.ArgumentParser(description=description) + p.add_argument('input', type=str, help='The gym annotation file') + p.add_argument( + 'output_dir', type=str, help='Output directory to save videos.') + p.add_argument('-n', '--num-jobs', type=int, default=24) + main(**vars(p.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/environment.yml b/Downstream/Open-Set-Action-Recognition/tools/data/gym/environment.yml new file mode 100644 index 0000000..86e7e1a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/environment.yml @@ -0,0 +1,36 @@ +name: kinetics +channels: + - anaconda + - menpo + - conda-forge + - defaults +dependencies: + - ca-certificates=2020.1.1 + - certifi=2020.4.5.1 + - ffmpeg=2.8.6 + - libcxx=10.0.0 + - libedit=3.1.20181209 + - libffi=3.3 + - ncurses=6.2 + - openssl=1.1.1g + - pip=20.0.2 + - python=3.7.7 + - readline=8.0 + - setuptools=46.4.0 + - sqlite=3.31.1 + - tk=8.6.8 + - wheel=0.34.2 + - xz=5.2.5 + - zlib=1.2.11 + - pip: + - decorator==4.4.2 + - intel-openmp==2019.0 + - joblib==0.15.1 + - mkl==2019.0 + - numpy==1.18.4 + - olefile==0.46 + - pandas==1.0.3 + - python-dateutil==2.8.1 + - pytz==2020.1 + - six==1.14.0 + - youtube-dl==2020.5.8 diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/gym/extract_frames.sh new file mode 100644 index 0000000..cfcc8c0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/extract_frames.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/gym/subactions/ ../../data/gym/subaction_frames/ --level 1 --flow-type tvl1 --ext mp4 --task both --new-short 256 +echo "Raw frames (RGB and tv-l1) Generated" + +cd gym/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/generate_file_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/gym/generate_file_list.py new file mode 100644 index 0000000..bbbaeae --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/generate_file_list.py @@ -0,0 +1,48 @@ +import os +import os.path as osp + +annotation_root = '../../../data/gym/annotations' +data_root = '../../../data/gym/subactions' +frame_data_root = '../../../data/gym/subaction_frames' + +videos = os.listdir(data_root) +videos = set(videos) + +train_file_org = osp.join(annotation_root, 'gym99_train_org.txt') +val_file_org = osp.join(annotation_root, 'gym99_val_org.txt') +train_file = osp.join(annotation_root, 'gym99_train.txt') +val_file = osp.join(annotation_root, 'gym99_val.txt') +train_frame_file = osp.join(annotation_root, 'gym99_train_frame.txt') +val_frame_file = osp.join(annotation_root, 'gym99_val_frame.txt') + +train_org = open(train_file_org).readlines() +train_org = [x.strip().split() for x in train_org] +train = [x for x in train_org if x[0] + '.mp4' in videos] +if osp.exists(frame_data_root): + train_frames = [] + for line in train: + length = len(os.listdir(osp.join(frame_data_root, line[0]))) + train_frames.append([line[0], str(length // 3), line[1]]) + train_frames = [' '.join(x) for x in train_frames] + with open(train_frame_file, 'w') as fout: + fout.write('\n'.join(train_frames)) + +train = [x[0] + '.mp4 ' + x[1] for x in train] +with open(train_file, 'w') as fout: + fout.write('\n'.join(train)) + +val_org = open(val_file_org).readlines() +val_org = [x.strip().split() for x in val_org] +val = [x for x in val_org if x[0] + '.mp4' in videos] +if osp.exists(frame_data_root): + val_frames = [] + for line in val: + length = len(os.listdir(osp.join(frame_data_root, line[0]))) + val_frames.append([line[0], str(length // 3), line[1]]) + val_frames = [' '.join(x) for x in val_frames] + with open(val_frame_file, 'w') as fout: + fout.write('\n'.join(val_frames)) + +val = [x[0] + '.mp4 ' + x[1] for x in val] +with open(val_file, 'w') as fout: + fout.write('\n'.join(val)) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_event.py b/Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_event.py new file mode 100644 index 0000000..2497323 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_event.py @@ -0,0 +1,60 @@ +import os +import os.path as osp +import subprocess + +import mmcv + +data_root = '../../../data/gym' +video_root = f'{data_root}/videos' +anno_root = f'{data_root}/annotations' +anno_file = f'{anno_root}/annotation.json' + +event_anno_file = f'{anno_root}/event_annotation.json' +event_root = f'{data_root}/events' + +videos = os.listdir(video_root) +videos = set(videos) +annotation = mmcv.load(anno_file) +event_annotation = {} + +mmcv.mkdir_or_exist(event_root) + +for k, v in annotation.items(): + if k + '.mp4' not in videos: + print(f'video {k} has not been downloaded') + continue + + video_path = osp.join(video_root, k + '.mp4') + + for event_id, event_anno in v.items(): + timestamps = event_anno['timestamps'][0] + start_time, end_time = timestamps + event_name = k + '_' + event_id + + output_filename = event_name + '.mp4' + + if not os.path.exists(osp.join(event_root, output_filename)): + command = [ + 'ffmpeg', '-i', + '"%s"' % video_path, '-ss', + str(start_time), '-t', + str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy', + '-threads', '8', '-loglevel', 'panic', + '"%s"' % osp.join(event_root, output_filename) + ] + command = ' '.join(command) + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + print( + f'Trimming of the Event {event_name} of Video {k} Failed', + flush=True) + else: + print("Result event file exists, ignored!") + + segments = event_anno['segments'] + if segments is not None: + event_annotation[event_name] = segments + +mmcv.dump(event_annotation, event_anno_file) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_subaction.py b/Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_subaction.py new file mode 100644 index 0000000..fa705e9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/gym/trim_subaction.py @@ -0,0 +1,51 @@ +import os +import os.path as osp +import subprocess + +import mmcv + +data_root = '../../../data/gym' +anno_root = f'{data_root}/annotations' + +event_anno_file = f'{anno_root}/event_annotation.json' +event_root = f'{data_root}/events' +subaction_root = f'{data_root}/subactions' + +events = os.listdir(event_root) +events = set(events) +annotation = mmcv.load(event_anno_file) + +mmcv.mkdir_or_exist(subaction_root) + +for k, v in annotation.items(): + if k + '.mp4' not in events: + print(f'video {k[:11]} has not been downloaded ' + f'or the event clip {k} not generated') + continue + + video_path = osp.join(event_root, k + '.mp4') + + for subaction_id, subaction_anno in v.items(): + timestamps = subaction_anno['timestamps'] + start_time, end_time = timestamps[0][0], timestamps[-1][1] + subaction_name = k + '_' + subaction_id + + output_filename = subaction_name + '.mp4' + + command = [ + 'ffmpeg', '-i', + '"%s"' % video_path, '-ss', + str(start_time), '-t', + str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy', + '-threads', '8', '-loglevel', 'panic', + '"%s"' % osp.join(subaction_root, output_filename) + ] + command = ' '.join(command) + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + print( + f'Trimming of the Subaction {subaction_name} of Event ' + f'{k} Failed', + flush=True) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/README.md new file mode 100644 index 0000000..ff2bf01 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/README.md @@ -0,0 +1,123 @@ +# Preparing HMDB51 + +## Introduction + +``` +@article{Kuehne2011HMDBAL, + title={HMDB: A large video database for human motion recognition}, + author={Hilde Kuehne and Hueihan Jhuang and E. Garrote and T. Poggio and Thomas Serre}, + journal={2011 International Conference on Computer Vision}, + year={2011}, + pages={2556-2563} +} +``` + +For basic dataset information, you can refer to the dataset [website](https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/hmdb51/`. + +To run the bash scripts below, you need to install `unrar`. you can install it by `sudo apt-get install unrar`, +or refer to [this repo](https://github.com/innerlee/setup) by following the usage and taking [`zzunrar.sh`](https://github.com/innerlee/setup/blob/master/zzunrar.sh) +script for easy installation without sudo. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_annotations.sh +``` + +## Step 2. Prepare Videos + +Then, you can run the following script to prepare videos. + +```shell +bash download_videos.sh +``` + +## Step 3. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/hmdb51_extracted/ +ln -s /mnt/SSD/hmdb51_extracted/ ../../../data/hmdb51/rawframes +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +bash extract_rgb_frames_opencv.sh +``` + +If both are required, run the following script to extract frames using "tvl1" algorithm. + +```shell +bash extract_frames.sh +``` + +## Step 4. Generate File List + +you can run the follow script to generate file list in the format of rawframes and videos. + +```shell +bash generate_rawframes_filelist.sh +bash generate_videos_filelist.sh +``` + +## Step 5. Check Directory Structure + +After the whole data process for HMDB51 preparation, +you will get the rawframes (RGB + Flow), videos and annotation files for HMDB51. + +In the context of the whole project (for HMDB51 only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── hmdb51 +│ │ ├── hmdb51_{train,val}_split_{1,2,3}_rawframes.txt +│ │ ├── hmdb51_{train,val}_split_{1,2,3}_videos.txt +│ │ ├── annotations +│ │ ├── videos +│ │ │ ├── brush_hair +│ │ │ │ ├── April_09_brush_hair_u_nm_np1_ba_goo_0.avi + +│ │ │ ├── wave +│ │ │ │ ├── 20060723sfjffbartsinger_wave_f_cm_np1_ba_med_0.avi +│ │ ├── rawframes +│ │ │ ├── brush_hair +│ │ │ │ ├── April_09_brush_hair_u_nm_np1_ba_goo_0 +│ │ │ │ │ ├── img_00001.jpg +│ │ │ │ │ ├── img_00002.jpg +│ │ │ │ │ ├── ... +│ │ │ │ │ ├── flow_x_00001.jpg +│ │ │ │ │ ├── flow_x_00002.jpg +│ │ │ │ │ ├── ... +│ │ │ │ │ ├── flow_y_00001.jpg +│ │ │ │ │ ├── flow_y_00002.jpg +│ │ │ ├── ... +│ │ │ ├── wave +│ │ │ │ ├── 20060723sfjffbartsinger_wave_f_cm_np1_ba_med_0 +│ │ │ │ ├── ... +│ │ │ │ ├── winKen_wave_u_cm_np1_ri_bad_1 + +``` + +For training and evaluating on HMDB51, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/download_annotations.sh new file mode 100644 index 0000000..f168cb1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/download_annotations.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/hmdb51/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} +wget http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar --no-check-certificate + +# sudo apt-get install unrar +unrar x test_train_splits.rar +rm test_train_splits.rar + +mv testTrainMulti_7030_splits/*.txt ./ +rmdir testTrainMulti_7030_splits + +cd - diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/download_videos.sh new file mode 100644 index 0000000..ea5d907 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/download_videos.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/hmdb51/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} + +mkdir -p ./videos +cd ./videos + +wget http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar --no-check-certificate + +# sudo apt-get install unrar +unrar x ./hmdb51_org.rar +rm ./hmdb51_org.rar + +# extract all rar files with full path +for file in *.rar; do unrar x $file; done + +rm ./*.rar +cd "../../../tools/data/hmdb51" diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_frames.sh new file mode 100644 index 0000000..fb63c16 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_frames.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/hmdb51/videos/ ../../data/hmdb51/rawframes/ --task both --level 2 --flow-type tvl1 +echo "Raw frames (RGB and Flow) Generated" +cd hmdb51/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_rgb_frames.sh new file mode 100644 index 0000000..9e935b1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_rgb_frames.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/hmdb51/videos/ ../../data/hmdb51/rawframes/ --task rgb --level 2 --ext avi +echo "Genearte raw frames (RGB only)" + +cd hmdb51/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..91ff4f3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/extract_rgb_frames_opencv.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/hmdb51/videos/ ../../data/hmdb51/rawframes/ --task rgb --level 2 --ext avi --use-opencv +echo "Genearte raw frames (RGB only)" + +cd hmdb51/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/generate_rawframes_filelist.sh new file mode 100644 index 0000000..bc20187 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/generate_rawframes_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ + +PYTHONPATH=. python tools/data/build_file_list.py hmdb51 data/hmdb51/rawframes/ --level 2 --format rawframes --shuffle +echo "Filelist for rawframes generated." + +cd tools/data/hmdb51/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/generate_videos_filelist.sh new file mode 100644 index 0000000..4acd28f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hmdb51/generate_videos_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ + +PYTHONPATH=. python tools/data/build_file_list.py hmdb51 data/hmdb51/videos/ --level 2 --format videos --shuffle +echo "Filelist for videos generated." + +cd tools/data/hmdb51/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/README.md new file mode 100644 index 0000000..dce2d9e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/README.md @@ -0,0 +1,122 @@ +# Preparing HVU + +## Introduction + +``` +@article{Diba2019LargeSH, + title={Large Scale Holistic Video Understanding}, + author={Ali Diba and M. Fayyaz and Vivek Sharma and Manohar Paluri and Jurgen Gall and R. Stiefelhagen and L. Gool}, + journal={arXiv: Computer Vision and Pattern Recognition}, + year={2019} +} +``` + +For basic dataset information, please refer to the official [project](https://github.com/holistic-video-understanding/HVU-Dataset/) and the [paper](https://arxiv.org/abs/1904.11451). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/hvu/`. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_annotations.sh +``` + +Besides, you need to run the following command to parse the tag list of HVU. + +```shell +python parse_tag_list.py +``` + +## Step 2. Prepare Videos + +Then, you can run the following script to prepare videos. +The codes are adapted from the [official crawler](https://github.com/activitynet/ActivityNet/tree/master/Crawler/Kinetics). Note that this might take a long time. + +```shell +bash download_videos.sh +``` + +## Step 3. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +You can use the following script to extract both RGB and Flow frames. + +```shell +bash extract_frames.sh +``` + +By default, we generate frames with short edge resized to 256. +More details can be found in [data_preparation](/docs/data_preparation.md) + +## Step 4. Generate File List + +You can run the follow scripts to generate file list in the format of videos and rawframes, respectively. + +```shell +bash generate_videos_filelist.sh +# execute the command below when rawframes are ready +bash generate_rawframes_filelist.sh +``` + +## Step 5. Generate File List for Each Individual Tag Categories. + +This part is **optional** if you don't want to train models on HVU for a specific tag category. + +The file list generated in step 4 contains labels of different categories. These file lists can only be +handled with HVUDataset and used for multi-task learning of different tag categories. The component +`LoadHVULabel` is needed to load the multi-category tags, and the `HVULoss` should be used to train +the model. + +If you only want to train video recognition models for a specific tag category, i.e. you want to train +a recognition model on HVU which only handles tags in the category `action`, we recommend you to use +the following command to generate file lists for the specific tag category. The new list, which only +contains tags of a specific category, can be handled with `VideoDataset` or `RawframeDataset`. The +recognition models can be trained with `BCELossWithLogits`. + +The following command generates file list for the tag category ${category}, note that the tag category you +specified should be in the 6 tag categories available in HVU: ['action', 'attribute', 'concept', 'event', +'object', 'scene']. + +```shell +python generate_sub_file_list.py path/to/filelist.json ${category} +``` + +The filename of the generated file list for ${category} is generated by replacing `hvu` in the original +filename with `hvu_${category}`. For example, if the original filename is `hvu_train.json`, the filename +of the file list for action is `hvu_action_train.json`. + + +## Step 6. Folder Structure + +After the whole data pipeline for HVU preparation. +you can get the rawframes (RGB + Flow), videos and annotation files for HVU. + +In the context of the whole project (for HVU only), the full folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── hvu +│ │ ├── hvu_train_video.json +│ │ ├── hvu_val_video.json +│ │ ├── hvu_train.json +│ │ ├── hvu_val.json +│ │ ├── annotations +│ │ ├── videos_train +│ │ │ ├── OLpWTpTC4P8_000570_000670.mp4 +│ │ │ ├── xsPKW4tZZBc_002330_002430.mp4 +│ │ │ ├── ... +│ │ ├── videos_val +│ │ ├── rawframes_train +│ │ ├── rawframes_val + +``` + +For training and evaluating on HVU, please refer to [getting_started](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download.py b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download.py new file mode 100644 index 0000000..19fd8ad --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download.py @@ -0,0 +1,200 @@ +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/activitynet/ActivityNet/ +# Original licence: Copyright (c) Microsoft, under the MIT License. +# ------------------------------------------------------------------------------ + +import argparse +import glob +import os +import shutil +import ssl +import subprocess +import uuid + +import mmcv +from joblib import Parallel, delayed + +ssl._create_default_https_context = ssl._create_unverified_context +args = None + + +def create_video_folders(dataset, output_dir, tmp_dir): + if not os.path.exists(output_dir): + os.makedirs(output_dir) + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + +def construct_video_filename(item, trim_format, output_dir): + """Given a dataset row, this function constructs the output filename for a + given video.""" + youtube_id, start_time, end_time = item + start_time, end_time = int(start_time * 10), int(end_time * 10) + basename = '%s_%s_%s.mp4' % (youtube_id, trim_format % start_time, + trim_format % end_time) + output_filename = os.path.join(output_dir, basename) + return output_filename + + +def download_clip(video_identifier, + output_filename, + start_time, + end_time, + tmp_dir='/tmp/hvu', + num_attempts=5, + url_base='https://www.youtube.com/watch?v='): + """Download a video from youtube if exists and is not blocked. + arguments: + --------- + video_identifier: str + Unique YouTube video identifier (11 characters) + output_filename: str + File path where the video will be stored. + start_time: float + Indicates the begining time in seconds from where the video + will be trimmed. + end_time: float + Indicates the ending time in seconds of the trimmed video. + """ + # Defensive argument checking. + assert isinstance(video_identifier, str), 'video_identifier must be string' + assert isinstance(output_filename, str), 'output_filename must be string' + assert len(video_identifier) == 11, 'video_identifier must have length 11' + + status = False + tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4()) + + if not os.path.exists(output_filename): + if not os.path.exists(tmp_filename): + command = [ + 'youtube-dl', '--quiet', '--no-warnings', + '--no-check-certificate', '-f', 'mp4', '-o', + '"%s"' % tmp_filename, + '"%s"' % (url_base + video_identifier) + ] + command = ' '.join(command) + print(command) + attempts = 0 + while True: + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + attempts += 1 + if attempts == num_attempts: + return status, 'Downloading Failed' + else: + break + + tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0] + # Construct command to trim the videos (ffmpeg required). + command = [ + 'ffmpeg', '-i', + '"%s"' % tmp_filename, '-ss', + str(start_time), '-t', + str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy', + '-threads', '1', '-loglevel', 'panic', + '"%s"' % output_filename + ] + command = ' '.join(command) + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return status, 'Trimming Failed' + + # Check if the video was successfully saved. + status = os.path.exists(output_filename) + os.remove(tmp_filename) + return status, 'Downloaded' + + +def download_clip_wrapper(item, trim_format, tmp_dir, output_dir): + """Wrapper for parallel processing purposes.""" + output_filename = construct_video_filename(item, trim_format, output_dir) + clip_id = os.path.basename(output_filename).split('.mp4')[0] + if os.path.exists(output_filename): + status = tuple([clip_id, True, 'Exists']) + return status + + youtube_id, start_time, end_time = item + downloaded, log = download_clip( + youtube_id, output_filename, start_time, end_time, tmp_dir=tmp_dir) + + status = tuple([clip_id, downloaded, log]) + return status + + +def parse_hvu_annotations(input_csv): + """Returns a parsed DataFrame. + arguments: + --------- + input_csv: str + Path to CSV file containing the following columns: + 'Tags, youtube_id, time_start, time_end' + returns: + ------- + dataset: List of tuples. Each tuple consists of + (youtube_id, time_start, time_end). The type of time is float. + """ + lines = open(input_csv).readlines() + lines = [x.strip().split(',')[1:] for x in lines[1:]] + + lines = [(x[0], float(x[1]), float(x[2])) for x in lines] + + return lines + + +def main(input_csv, + output_dir, + trim_format='%06d', + num_jobs=24, + tmp_dir='/tmp/hvu'): + # Reading and parsing HVU. + dataset = parse_hvu_annotations(input_csv) + + # Creates folders where videos will be saved later. + create_video_folders(dataset, output_dir, tmp_dir) + + # Download all clips. + if num_jobs == 1: + status_lst = [] + for item in dataset: + status_lst.append( + download_clip_wrapper(item, trim_format, tmp_dir, output_dir)) + else: + status_lst = Parallel(n_jobs=num_jobs)( + delayed(download_clip_wrapper)(item, trim_format, tmp_dir, + output_dir) for item in dataset) + + # Clean tmp dir. + shutil.rmtree(tmp_dir) + # Save download report. + mmcv.dump(status_lst, 'download_report.json') + + +if __name__ == '__main__': + description = 'Helper script for downloading and trimming HVU videos.' + p = argparse.ArgumentParser(description=description) + p.add_argument( + 'input_csv', + type=str, + help=('CSV file containing the following format: ' + 'Tags, youtube_id, time_start, time_end')) + p.add_argument( + 'output_dir', + type=str, + help='Output directory where videos will be saved.') + p.add_argument( + '-f', + '--trim-format', + type=str, + default='%06d', + help=('This will be the format for the ' + 'filename of trimmed videos: ' + 'videoid_%0xd(start_time)_%0xd(end_time).mp4. ' + 'Note that the start_time is multiplied by 10 since ' + 'decimal exists somewhere. ')) + p.add_argument('-n', '--num-jobs', type=int, default=24) + p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/hvu') + main(**vars(p.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download_annotations.sh new file mode 100644 index 0000000..d100a47 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download_annotations.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -e + +DATA_DIR="../../../data/hvu/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +git clone https://github.com/holistic-video-understanding/HVU-Dataset.git + +cd HVU-Dataset +unzip -o HVU_Train_V1.0.zip +unzip -o HVU_Val_V1.0.zip +cd .. +mv HVU-Dataset/HVU_Train_V1.0.csv ${DATA_DIR}/hvu_train.csv +mv HVU-Dataset/HVU_Val_V1.0.csv ${DATA_DIR}/hvu_val.csv +mv HVU-Dataset/HVU_Tags_Categories_V1.0.csv ${DATA_DIR}/hvu_categories.csv + +rm -rf HVU-Dataset diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download_videos.sh new file mode 100644 index 0000000..eca14cd --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/download_videos.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# set up environment +conda env create -f environment.yml +source activate hvu +pip install --upgrade youtube-dl + +DATA_DIR="../../../data/hvu" +ANNO_DIR="../../../data/hvu/annotations" +python download.py ${ANNO_DIR}/hvu_train.csv ${DATA_DIR}/videos_train +python download.py ${ANNO_DIR}/hvu_val.csv ${DATA_DIR}/videos_val + +source deactivate hvu +conda remove -n hvu --all diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/environment.yml b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/environment.yml new file mode 100644 index 0000000..86e7e1a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/environment.yml @@ -0,0 +1,36 @@ +name: kinetics +channels: + - anaconda + - menpo + - conda-forge + - defaults +dependencies: + - ca-certificates=2020.1.1 + - certifi=2020.4.5.1 + - ffmpeg=2.8.6 + - libcxx=10.0.0 + - libedit=3.1.20181209 + - libffi=3.3 + - ncurses=6.2 + - openssl=1.1.1g + - pip=20.0.2 + - python=3.7.7 + - readline=8.0 + - setuptools=46.4.0 + - sqlite=3.31.1 + - tk=8.6.8 + - wheel=0.34.2 + - xz=5.2.5 + - zlib=1.2.11 + - pip: + - decorator==4.4.2 + - intel-openmp==2019.0 + - joblib==0.15.1 + - mkl==2019.0 + - numpy==1.18.4 + - olefile==0.46 + - pandas==1.0.3 + - python-dateutil==2.8.1 + - pytz==2020.1 + - six==1.14.0 + - youtube-dl==2020.5.8 diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/extract_frames.sh new file mode 100644 index 0000000..d50f1cf --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/extract_frames.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/hvu/videos_train/ ../../data/hvu/rawframes_train/ --level 1 --flow-type tvl1 --ext mp4 --task both --new-short 256 +echo "Raw frames (RGB and tv-l1) Generated for train set" + +python build_rawframes.py ../../data/hvu/videos_val/ ../../data/hvu/rawframes_val/ --level 1 --flow-type tvl1 --ext mp4 --task both --new-short 256 +echo "Raw frames (RGB and tv-l1) Generated for val set" + +cd hvu/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_file_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_file_list.py new file mode 100644 index 0000000..e76706a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_file_list.py @@ -0,0 +1,151 @@ +import argparse +import fnmatch +import glob +import os +import os.path as osp + +import mmcv + +annotation_root = '../../data/hvu/annotations' +tag_file = 'hvu_tags.json' +args = None + + +def parse_directory(path, + rgb_prefix='img_', + flow_x_prefix='flow_x_', + flow_y_prefix='flow_y_', + level=1): + """Parse directories holding extracted frames from standard benchmarks. + + Args: + path (str): Directory path to parse frames. + rgb_prefix (str): Prefix of generated rgb frames name. + default: 'img_'. + flow_x_prefix (str): Prefix of generated flow x name. + default: `flow_x_`. + flow_y_prefix (str): Prefix of generated flow y name. + default: `flow_y_`. + level (int): Directory level for glob searching. Options are 1 and 2. + default: 1. + + Returns: + dict: frame info dict with video id as key and tuple(path(str), + rgb_num(int), flow_x_num(int)) as value. + """ + print(f'parse frames under directory {path}') + if level == 1: + # Only search for one-level directory + def locate_directory(x): + return osp.basename(x) + + frame_dirs = glob.glob(osp.join(path, '*')) + + elif level == 2: + # search for two-level directory + def locate_directory(x): + return osp.join(osp.basename(osp.dirname(x)), osp.basename(x)) + + frame_dirs = glob.glob(osp.join(path, '*', '*')) + + else: + raise ValueError('level can be only 1 or 2') + + def count_files(directory, prefix_list): + """Count file number with a given directory and prefix. + + Args: + directory (str): Data directory to be search. + prefix_list (list): List or prefix. + + Returns: + list (int): Number list of the file with the prefix. + """ + lst = os.listdir(directory) + cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list] + return cnt_list + + # check RGB + frame_dict = {} + for i, frame_dir in enumerate(frame_dirs): + total_num = count_files(frame_dir, + (rgb_prefix, flow_x_prefix, flow_y_prefix)) + dir_name = locate_directory(frame_dir) + + num_x = total_num[1] + num_y = total_num[2] + if num_x != num_y: + raise ValueError(f'x and y direction have different number ' + f'of flow images in video directory: {frame_dir}') + if i % 200 == 0: + print(f'{i} videos parsed') + + frame_dict[dir_name] = (frame_dir, total_num[0], num_x) + + print('frame directory analysis done') + return frame_dict + + +def parse_args(): + parser = argparse.ArgumentParser(description='build file list for HVU') + parser.add_argument('--input_csv', type=str, help='path of input csv file') + parser.add_argument( + '--src_dir', type=str, help='source video / frames directory') + parser.add_argument( + '--output', + type=str, + help='output filename, should \ + ends with .json') + parser.add_argument( + '--mode', + type=str, + choices=['frames', 'videos'], + help='generate file list for frames or videos') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + tag_cates = mmcv.load(tag_file) + tag2category = {} + for k in tag_cates: + for tag in tag_cates[k]: + tag2category[tag] = k + + data_list = open(args.input_csv).readlines() + data_list = [x.strip().split(',') for x in data_list[1:]] + + if args.mode == 'videos': + downloaded = os.listdir(args.src_dir) + downloaded = [x.split('.')[0] for x in downloaded] + downloaded_set = set(downloaded) + else: + parse_result = parse_directory(args.src_dir) + downloaded_set = set(parse_result) + + def parse_line(line): + tags, youtube_id, start, end = line + start, end = int(float(start) * 10), int(float(end) * 10) + newname = f'{youtube_id}_{start:06d}_{end:06d}' + tags = tags.split('|') + all_tags = {} + for tag in tags: + category = tag2category[tag] + all_tags.setdefault(category, + []).append(tag_cates[category].index(tag)) + return newname, all_tags + + data_list = [parse_line(line) for line in data_list] + data_list = [line for line in data_list if line[0] in downloaded_set] + + if args.mode == 'frames': + result = [ + dict( + frame_dir=k[0], total_frames=parse_result[k[0]][1], label=k[1]) + for k in data_list + ] + elif args.mode == 'videos': + result = [dict(filename=k[0] + '.mp4', label=k[1]) for k in data_list] + mmcv.dump(result, args.output) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_rawframes_filelist.sh new file mode 100644 index 0000000..59f3fa1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_rawframes_filelist.sh @@ -0,0 +1,5 @@ +# to generate file list of frames +python generate_file_list.py --input_csv ../../../data/hvu/annotations/hvu_train.csv --src_dir ../../../data/hvu/rawframes_train \ + --output ../../../data/hvu/hvu_train.json --mode frames +python generate_file_list.py --input_csv ../../../data/hvu/annotations/hvu_val.csv --src_dir ../../../data/hvu/rawframes_val \ + --output ../../../data/hvu/hvu_val.json --mode frames diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_sub_file_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_sub_file_list.py new file mode 100644 index 0000000..41279c4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_sub_file_list.py @@ -0,0 +1,41 @@ +import argparse +import os.path as osp + +import mmcv + + +def main(annotation_file, category): + assert category in [ + 'action', 'attribute', 'concept', 'event', 'object', 'scene' + ] + + data = mmcv.load(annotation_file) + basename = osp.basename(annotation_file) + dirname = osp.dirname(annotation_file) + basename = basename.replace('hvu', f'hvu_{category}') + + target_file = osp.join(dirname, basename) + + result = [] + for item in data: + label = item['label'] + if category in label: + item['label'] = label[category] + result.append(item) + + mmcv.dump(data, target_file) + + +if __name__ == '__main__': + description = 'Helper script for generating HVU per-category file list.' + p = argparse.ArgumentParser(description=description) + p.add_argument( + 'annotation_file', + type=str, + help=('The annotation file which contains tags of all categories.')) + p.add_argument( + 'category', + type=str, + choices=['action', 'attribute', 'concept', 'event', 'object', 'scene'], + help='The tag category that you want to generate file list for.') + main(**vars(p.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_videos_filelist.sh new file mode 100644 index 0000000..deba7b7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/generate_videos_filelist.sh @@ -0,0 +1,5 @@ +# to generate file lists of videos +python generate_file_list.py --input_csv ../../../data/hvu/annotations/hvu_train.csv --src_dir ../../../data/hvu/videos_train \ + --output ../../../data/hvu/hvu_train_video.json --mode videos +python generate_file_list.py --input_csv ../../../data/hvu/annotations/hvu_val.csv --src_dir ../../../data/hvu/videos_val \ + --output ../../../data/hvu/hvu_val_video.json --mode videos diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/hvu/parse_tag_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/parse_tag_list.py new file mode 100644 index 0000000..41e8db1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/hvu/parse_tag_list.py @@ -0,0 +1,15 @@ +import mmcv + +tag_list = '../../../data/hvu/annotations/hvu_categories.csv' + +lines = open(tag_list).readlines() +lines = [x.strip().split(',') for x in lines[1:]] +tag_categories = {} +for line in lines: + tag, category = line + tag_categories.setdefault(category, []).append(tag) + +for k in tag_categories: + tag_categories[k].sort() + +mmcv.dump(tag_categories, 'hvu_tags.json') diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/jester/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/jester/README.md new file mode 100644 index 0000000..dfd4c64 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/jester/README.md @@ -0,0 +1,140 @@ +# Preparing Jester + +## Introduction +``` +@InProceedings{Materzynska_2019_ICCV, + author = {Materzynska, Joanna and Berger, Guillaume and Bax, Ingo and Memisevic, Roland}, + title = {The Jester Dataset: A Large-Scale Video Dataset of Human Gestures}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops}, + month = {Oct}, + year = {2019} +} +``` + +For basic dataset information, you can refer to the dataset [website](https://20bn.com/datasets/jester/v1). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/jester/`. + +## Step 1. Prepare Annotations + +First of all, you have to sign in and download annotations to `$MMACTION2/data/jester/annotations` on the official [website](https://20bn.com/datasets/jester/v1). + +## Step 2. Prepare RGB Frames + +Since the [jester website](https://20bn.com/datasets/jester/v1) doesn't provide the original video data and only extracted RGB frames are available, you have to directly download RGB frames from [jester website](https://20bn.com/datasets/jester/v1). + +You can download all RGB frame parts on [jester website](https://20bn.com/datasets/jester/v1) to `$MMACTION2/data/jester/` and use the following command to extract. + +```shell +cd $MMACTION2/data/jester/ +cat 20bn-jester-v1-?? | tar zx +cd $MMACTION2/tools/data/jester/ +``` + +For users who only want to use RGB frames, you can skip to step 5 to generate file lists in the format of rawframes. Since the prefix of official JPGs is "%05d.jpg" (e.g., "00001.jpg"), +we add "filename_tmpl='{:05}.jpg'" to the dict of `data.train`, `data.val` and `data.test` in the config files related with jester like this: + +``` +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +``` + +## Step 3. Extract Flow + +This part is **optional** if you only want to use RGB frames. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/jester_extracted/ +ln -s /mnt/SSD/jester_extracted/ ../../../data/jester/rawframes +``` + +Then, you can run the following script to extract optical flow based on RGB frames. + +```shell +cd $MMACTION2/tools/data/jester/ +bash extract_flow.sh +``` + +## Step 4. Encode Videos + +This part is **optional** if you only want to use RGB frames. + +You can run the following script to encode videos. + +```shell +cd $MMACTION2/tools/data/jester/ +bash encode_videos.sh +``` + +## Step 5. Generate File List + +You can run the follow script to generate file list in the format of rawframes and videos. + +```shell +cd $MMACTION2/tools/data/jester/ +bash generate_{rawframes, videos}_filelist.sh +``` + +## Step 5. Check Directory Structure + +After the whole data process for Jester preparation, +you will get the rawframes (RGB + Flow), and annotation files for Jester. + +In the context of the whole project (for Jester only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── jester +│ │ ├── jester_{train,val}_list_rawframes.txt +│ │ ├── jester_{train,val}_list_videos.txt +│ │ ├── annotations +│ | ├── videos +│ | | ├── 1.mp4 +│ | | ├── 2.mp4 +│ | | ├──... +│ | ├── rawframes +│ | | ├── 1 +│ | | | ├── 00001.jpg +│ | | | ├── 00002.jpg +│ | | | ├── ... +│ | | | ├── flow_x_00001.jpg +│ | | | ├── flow_x_00002.jpg +│ | | | ├── ... +│ | | | ├── flow_y_00001.jpg +│ | | | ├── flow_y_00002.jpg +│ | | | ├── ... +│ | | ├── 2 +│ | | ├── ... + +``` + +For training and evaluating on Jester, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/jester/encode_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/jester/encode_videos.sh new file mode 100644 index 0000000..c220424 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/jester/encode_videos.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_videos.py ../../data/jester/rawframes/ ../../data/jester/videos/ --fps 12 --level 1 --start-idx 1 --filename-tmpl '%05d' +echo "Encode videos" + +cd jester/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/jester/extract_flow.sh b/Downstream/Open-Set-Action-Recognition/tools/data/jester/extract_flow.sh new file mode 100644 index 0000000..f6b5090 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/jester/extract_flow.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/jester/rawframes/ ../../data/jester/rawframes/ --task flow --level 1 --flow-type tvl1 --input-frames +echo "Flow (tv-l1) Generated" +cd jester/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/jester/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/jester/generate_rawframes_filelist.sh new file mode 100644 index 0000000..c92674a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/jester/generate_rawframes_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py jester data/jester/rawframes/ --rgb-prefix '0' --num-split 1 --level 1 --subset train --format rawframes --shuffle +PYTHONPATH=. python tools/data/build_file_list.py jester data/jester/rawframes/ --rgb-prefix '0' --num-split 1 --level 1 --subset val --format rawframes --shuffle +echo "Filelist for rawframes generated." + +cd tools/data/jester/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/jester/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/jester/generate_videos_filelist.sh new file mode 100644 index 0000000..6938496 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/jester/generate_videos_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py jester data/jester/videos/ --num-split 1 --level 1 --subset train --format videos --shuffle +PYTHONPATH=. python tools/data/build_file_list.py jester data/jester/videos/ --num-split 1 --level 1 --subset val --format videos --shuffle +echo "Filelist for videos generated." + +cd tools/data/jester/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/jhmdb/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/jhmdb/README.md new file mode 100644 index 0000000..bbe2fb3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/jhmdb/README.md @@ -0,0 +1,95 @@ +# Preparing JHMDB + +## Introduction + +``` +@inproceedings{Jhuang:ICCV:2013, + title = {Towards understanding action recognition}, + author = {H. Jhuang and J. Gall and S. Zuffi and C. Schmid and M. J. Black}, + booktitle = {International Conf. on Computer Vision (ICCV)}, + month = Dec, + pages = {3192-3199}, + year = {2013} +} +``` + +For basic dataset information, you can refer to the dataset [website](http://jhmdb.is.tue.mpg.de/). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/jhmdb/`. + +## Download and Extract + +You can download the RGB frames, optical flow and ground truth annotations from [google drive](https://drive.google.com/drive/folders/1BvGywlAGrACEqRyfYbz3wzlVV3cDFkct). +The data are provided from [MOC](https://github.com/MCG-NJU/MOC-Detector/blob/master/readme/Dataset.md), which is adapted from [act-detector](https://github.com/vkalogeiton/caffe/tree/act-detector). + +After downloading the `JHMDB.tar.gz` file and put it in `$MMACTION2/tools/data/jhmdb/`, you can run the following command to extract. + +```shell +tar -zxvf JHMDB.tar.gz +``` + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/JHMDB/ +ln -s /mnt/SSD/JHMDB/ ../../../data/jhmdb +``` + +## Check Directory Structure + +After extracting, you will get the `FlowBrox04` directory, `Frames` directory and `JHMDB-GT.pkl` for JHMDB. + +In the context of the whole project (for JHMDB only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── jhmdb +│ | ├── FlowBrox04 +│ | | ├── brush_hair +│ | | | ├── April_09_brush_hair_u_nm_np1_ba_goo_0 +│ | | | | ├── 00001.jpg +│ | | | | ├── 00002.jpg +│ | | | | ├── ... +│ | | | | ├── 00039.jpg +│ | | | | ├── 00040.jpg +│ | | | ├── ... +│ | | | ├── Trannydude___Brushing_SyntheticHair___OhNOES!__those_fukin_knots!_brush_hair_u_nm_np1_fr_goo_2 +│ | | ├── ... +│ | | ├── wave +│ | | | ├── 21_wave_u_nm_np1_fr_goo_5 +│ | | | ├── ... +│ | | | ├── Wie_man_winkt!!_wave_u_cm_np1_fr_med_0 +│ | ├── Frames +│ | | ├── brush_hair +│ | | | ├── April_09_brush_hair_u_nm_np1_ba_goo_0 +│ | | | | ├── 00001.png +│ | | | | ├── 00002.png +│ | | | | ├── ... +│ | | | | ├── 00039.png +│ | | | | ├── 00040.png +│ | | | ├── ... +│ | | | ├── Trannydude___Brushing_SyntheticHair___OhNOES!__those_fukin_knots!_brush_hair_u_nm_np1_fr_goo_2 +│ | | ├── ... +│ | | ├── wave +│ | | | ├── 21_wave_u_nm_np1_fr_goo_5 +│ | | | ├── ... +│ | | | ├── Wie_man_winkt!!_wave_u_cm_np1_fr_med_0 +│ | ├── JHMDB-GT.pkl + +``` + +**Note**: The `JHMDB-GT.pkl` exists as a cache, it contains 6 items as follows: +1. `labels` (list): List of the 21 labels. +2. `gttubes` (dict): Dictionary that contains the ground truth tubes for each video. + A **gttube** is dictionary that associates with each index of label and a list of tubes. + A **tube** is a numpy array with `nframes` rows and 5 columns, each col is in format like ` `. +3. `nframes` (dict): Dictionary that contains the number of frames for each video, like `'walk/Panic_in_the_Streets_walk_u_cm_np1_ba_med_5': 16`. +4. `train_videos` (list): A list with `nsplits=1` elements, each one containing the list of training videos. +5. `test_videos` (list): A list with `nsplits=1` elements, each one containing the list of testing videos. +6. `resolution` (dict): Dictionary that outputs a tuple (h,w) of the resolution for each video, like `'pour/Bartender_School_Students_Practice_pour_u_cm_np1_fr_med_1': (240, 320)`. diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/README.md new file mode 100644 index 0000000..098ad4b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/README.md @@ -0,0 +1,129 @@ +# Preparing Kinetics-[400/600/700] + +## Introduction + +``` +@inproceedings{inproceedings, + author = {Carreira, J. and Zisserman, Andrew}, + year = {2017}, + month = {07}, + pages = {4724-4733}, + title = {Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset}, + doi = {10.1109/CVPR.2017.502} +} +``` + +For basic dataset information, please refer to the official [website](https://deepmind.com/research/open-source/open-source-datasets/kinetics/). The scripts can be used for preparing kinetics400, kinetics600, kinetics700. To prepare different version of kinetics, you need to replace `${DATASET}` in the following examples with the specific dataset name. The choices of dataset names are `kinetics400`, `kinetics600` and `kinetics700`. +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/${DATASET}/`. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_annotations.sh ${DATASET} +``` + +## Step 2. Prepare Videos + +Then, you can run the following script to prepare videos. +The codes are adapted from the [official crawler](https://github.com/activitynet/ActivityNet/tree/master/Crawler/Kinetics). Note that this might take a long time. + +```shell +bash download_videos.sh ${DATASET} +``` + +**Important**: If you have already downloaded video dataset using the download script above, +you must replace all whitespaces in the class name for ease of processing by running + +```shell +bash rename_classnames.sh ${DATASET} +``` + +For better decoding speed, you can resize the original videos into smaller sized, densely encoded version by: + +```bash +python ../resize_videos.py ../../../data/${DATASET}/videos_train/ ../../../data/${DATASET}/videos_train_256p_dense_cache --dense --level 2 +``` + +## Step 3. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. And you can run the following script to soft link the extracted frames. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/${DATASET}_extracted_train/ +ln -s /mnt/SSD/${DATASET}_extracted_train/ ../../../data/${DATASET}/rawframes_train/ +mkdir /mnt/SSD/${DATASET}_extracted_val/ +ln -s /mnt/SSD/${DATASET}_extracted_val/ ../../../data/${DATASET}/rawframes_val/ +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +bash extract_rgb_frames.sh ${DATASET} +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +bash extract_rgb_frames_opencv.sh ${DATASET} +``` + +If both are required, run the following script to extract frames. + +```shell +bash extract_frames.sh ${DATASET} +``` + +These three commands above can generate images with size 340x256, if you want to generate images with short edge 320 (320p), +you can change the args `--new-width 340 --new-height 256` to `--new-short 320`. +More details can be found in [data_preparation](/docs/data_preparation.md) + +## Step 4. Generate File List + +you can run the follow scripts to generate file list in the format of videos and rawframes, respectively. + +```shell +bash generate_videos_filelist.sh ${DATASET} +# execute the command below when rawframes are ready +bash generate_rawframes_filelist.sh ${DATASET} +``` + +## Step 5. Folder Structure + +After the whole data pipeline for Kinetics preparation. +you can get the rawframes (RGB + Flow), videos and annotation files for Kinetics. + +In the context of the whole project (for Kinetics only), the *minimal* folder structure will look like: +(*minimal* means that some data are not necessary: for example, you may want to evaluate kinetics using the original video format.) + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── ${DATASET} +│ │ ├── ${DATASET}_train_list_videos.txt +│ │ ├── ${DATASET}_val_list_videos.txt +│ │ ├── annotations +│ │ ├── videos_train +│ │ ├── videos_val +│ │ │ ├── abseiling +│ │ │ │ ├── 0wR5jVB-WPk_000417_000427.mp4 +│ │ │ │ ├── ... +│ │ │ ├── ... +│ │ │ ├── wrapping_present +│ │ │ ├── ... +│ │ │ ├── zumba +│ │ ├── rawframes_train +│ │ ├── rawframes_val + +``` + +For training and evaluating on Kinetics, please refer to [getting_started](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/check_data.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/check_data.sh new file mode 100644 index 0000000..ee5d693 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/check_data.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics10" ] || [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +pwd_dir=$pwd + +cd ../../../ +PYTHONPATH=. python tools/data/data_check.py data/${DATASET}/videos_train data/${DATASET}/${DATASET}_train_list_videos.txt train +echo "Train filelist for video passed checking." + +PYTHONPATH=. python tools/data/data_check.py data/${DATASET}/videos_val data/${DATASET}/${DATASET}_val_list_videos.txt val +echo "Val filelist for video passed checking." +cd ${pwd_dir} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download.py b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download.py new file mode 100644 index 0000000..d1281ee --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download.py @@ -0,0 +1,228 @@ +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/activitynet/ActivityNet/ +# Original licence: Copyright (c) Microsoft, under the MIT License. +# ------------------------------------------------------------------------------ +import argparse +import glob +import json +import os +import shutil +import ssl +import subprocess +import uuid +from collections import OrderedDict + +import pandas as pd +from joblib import Parallel, delayed + +ssl._create_default_https_context = ssl._create_unverified_context + + +def create_video_folders(dataset, output_dir, tmp_dir): + """Creates a directory for each label name in the dataset.""" + if 'label-name' not in dataset.columns: + this_dir = os.path.join(output_dir, 'test') + if not os.path.exists(this_dir): + os.makedirs(this_dir) + # I should return a dict but ... + return this_dir + if not os.path.exists(output_dir): + os.makedirs(output_dir) + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + label_to_dir = {} + for label_name in dataset['label-name'].unique(): + this_dir = os.path.join(output_dir, label_name) + if not os.path.exists(this_dir): + os.makedirs(this_dir) + label_to_dir[label_name] = this_dir + return label_to_dir + + +def construct_video_filename(row, label_to_dir, trim_format='%06d'): + """Given a dataset row, this function constructs the output filename for a + given video.""" + basename = '%s_%s_%s.mp4' % (row['video-id'], + trim_format % row['start-time'], + trim_format % row['end-time']) + if not isinstance(label_to_dir, dict): + dirname = label_to_dir + else: + dirname = label_to_dir[row['label-name']] + output_filename = os.path.join(dirname, basename) + return output_filename + + +def download_clip(video_identifier, + output_filename, + start_time, + end_time, + tmp_dir='/tmp/kinetics', + num_attempts=5, + url_base='https://www.youtube.com/watch?v='): + """Download a video from youtube if exists and is not blocked. + arguments: + --------- + video_identifier: str + Unique YouTube video identifier (11 characters) + output_filename: str + File path where the video will be stored. + start_time: float + Indicates the begining time in seconds from where the video + will be trimmed. + end_time: float + Indicates the ending time in seconds of the trimmed video. + """ + # Defensive argument checking. + assert isinstance(video_identifier, str), 'video_identifier must be string' + assert isinstance(output_filename, str), 'output_filename must be string' + assert len(video_identifier) == 11, 'video_identifier must have length 11' + + status = False + # Construct command line for getting the direct video link. + tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4()) + + if not os.path.exists(output_filename): + if not os.path.exists(tmp_filename): + command = [ + 'youtube-dl', '--quiet', '--no-warnings', + '--no-check-certificate', '-f', 'mp4', '-o', + '"%s"' % tmp_filename, + '"%s"' % (url_base + video_identifier) + ] + command = ' '.join(command) + print(command) + attempts = 0 + while True: + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + attempts += 1 + if attempts == num_attempts: + return status, err.output + else: + break + + tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0] + # Construct command to trim the videos (ffmpeg required). + command = [ + 'ffmpeg', '-i', + '"%s"' % tmp_filename, '-ss', + str(start_time), '-t', + str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy', + '-threads', '1', '-loglevel', 'panic', + '"%s"' % output_filename + ] + command = ' '.join(command) + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + return status, err.output + + # Check if the video was successfully saved. + status = os.path.exists(output_filename) + os.remove(tmp_filename) + return status, 'Downloaded' + + +def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir): + """Wrapper for parallel processing purposes.""" + output_filename = construct_video_filename(row, label_to_dir, trim_format) + clip_id = os.path.basename(output_filename).split('.mp4')[0] + if os.path.exists(output_filename): + status = tuple([clip_id, True, 'Exists']) + return status + + downloaded, log = download_clip( + row['video-id'], + output_filename, + row['start-time'], + row['end-time'], + tmp_dir=tmp_dir) + status = tuple([clip_id, downloaded, log]) + return status + + +def parse_kinetics_annotations(input_csv, ignore_is_cc=False): + """Returns a parsed DataFrame. + arguments: + --------- + input_csv: str + Path to CSV file containing the following columns: + 'YouTube Identifier,Start time,End time,Class label' + returns: + ------- + dataset: DataFrame + Pandas with the following columns: + 'video-id', 'start-time', 'end-time', 'label-name' + """ + df = pd.read_csv(input_csv) + if 'youtube_id' in df.columns: + columns = OrderedDict([('youtube_id', 'video-id'), + ('time_start', 'start-time'), + ('time_end', 'end-time'), + ('label', 'label-name')]) + df.rename(columns=columns, inplace=True) + if ignore_is_cc: + df = df.loc[:, df.columns.tolist()[:-1]] + return df + + +def main(input_csv, + output_dir, + trim_format='%06d', + num_jobs=24, + tmp_dir='/tmp/kinetics'): + # Reading and parsing Kinetics. + dataset = parse_kinetics_annotations(input_csv) + + # Creates folders where videos will be saved later. + label_to_dir = create_video_folders(dataset, output_dir, tmp_dir) + + # Download all clips. + if num_jobs == 1: + status_list = [] + for i, row in dataset.iterrows(): + status_list.append( + download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir)) + else: + status_list = Parallel( + n_jobs=num_jobs)(delayed(download_clip_wrapper)( + row, label_to_dir, trim_format, tmp_dir) + for i, row in dataset.iterrows()) + + # Clean tmp dir. + shutil.rmtree(tmp_dir) + + # Save download report. + with open('download_report.json', 'w') as fobj: + fobj.write(json.dumps(status_list)) + + +if __name__ == '__main__': + description = 'Helper script for downloading and trimming kinetics videos.' + p = argparse.ArgumentParser(description=description) + p.add_argument( + 'input_csv', + type=str, + help=('CSV file containing the following format: ' + 'YouTube Identifier,Start time,End time,Class label')) + p.add_argument( + 'output_dir', + type=str, + help='Output directory where videos will be saved.') + p.add_argument( + '-f', + '--trim-format', + type=str, + default='%06d', + help=('This will be the format for the ' + 'filename of trimmed videos: ' + 'videoid_%0xd(start_time)_%0xd(end_time).mp4')) + p.add_argument('-n', '--num-jobs', type=int, default=24) + p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/kinetics') + # help='CSV file of the previous version of Kinetics.') + main(**vars(p.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_annotations.sh new file mode 100644 index 0000000..09e25b1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_annotations.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +DATA_DIR="../../../data/${DATASET}/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://storage.googleapis.com/deepmind-media/Datasets/${DATASET}.tar.gz + +tar -zxvf ${DATASET}.tar.gz --strip-components 1 -C ${DATA_DIR}/ +mv ${DATA_DIR}/train.csv ${DATA_DIR}/kinetics_train.csv +mv ${DATA_DIR}/validate.csv ${DATA_DIR}/kinetics_val.csv +mv ${DATA_DIR}/test.csv ${DATA_DIR}/kinetics_test.csv + +rm ${DATASET}.tar.gz +rm ${DATA_DIR}/*.json diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_subset.py b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_subset.py new file mode 100644 index 0000000..d2ee987 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_subset.py @@ -0,0 +1,259 @@ +# ------------------------------------------------------------------------------ +# Adapted from https://github.com/activitynet/ActivityNet/ +# Original licence: Copyright (c) Microsoft, under the MIT License. +# ------------------------------------------------------------------------------ +import argparse +import glob +import json +import os +import shutil +import ssl +import subprocess +import uuid +from collections import OrderedDict + +import pandas as pd +from joblib import Parallel, delayed + +ssl._create_default_https_context = ssl._create_unverified_context + + +def create_video_folders(dataset, output_dir, tmp_dir): + """Creates a directory for each label name in the dataset.""" + if 'label-name' not in dataset.columns: + this_dir = os.path.join(output_dir, 'test') + if not os.path.exists(this_dir): + os.makedirs(this_dir) + # I should return a dict but ... + return this_dir + if not os.path.exists(output_dir): + os.makedirs(output_dir) + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + label_to_dir = {} + for label_name in dataset['label-name'].unique(): + this_dir = os.path.join(output_dir, label_name) + if not os.path.exists(this_dir): + os.makedirs(this_dir) + label_to_dir[label_name] = this_dir + return label_to_dir + + +def construct_video_filename(row, label_to_dir, trim_format='%06d'): + """Given a dataset row, this function constructs the output filename for a + given video.""" + basename = '%s_%s_%s.mp4' % (row['video-id'], + trim_format % row['start-time'], + trim_format % row['end-time']) + if not isinstance(label_to_dir, dict): + dirname = label_to_dir + else: + dirname = label_to_dir[row['label-name']] + output_filename = os.path.join(dirname, basename) + return output_filename + + +def download_clip(video_identifier, + output_filename, + start_time, + end_time, + tmp_dir='/tmp/kinetics', + num_attempts=5, + url_base='https://www.youtube.com/watch?v='): + """Download a video from youtube if exists and is not blocked. + arguments: + --------- + video_identifier: str + Unique YouTube video identifier (11 characters) + output_filename: str + File path where the video will be stored. + start_time: float + Indicates the begining time in seconds from where the video + will be trimmed. + end_time: float + Indicates the ending time in seconds of the trimmed video. + """ + # Defensive argument checking. + assert isinstance(video_identifier, str), 'video_identifier must be string' + assert isinstance(output_filename, str), 'output_filename must be string' + assert len(video_identifier) == 11, 'video_identifier must have length 11' + + status = False + # Construct command line for getting the direct video link. + tmp_filename = os.path.join(tmp_dir, '%s.%%(ext)s' % uuid.uuid4()) + + if not os.path.exists(output_filename): + if not os.path.exists(tmp_filename): + command = [ + 'youtube-dl', '--quiet', '--no-warnings', + '--no-check-certificate', '-f', 'mp4', '-o', + '"%s"' % tmp_filename, + '"%s"' % (url_base + video_identifier) + ] + command = ' '.join(command) + print(command) + attempts = 0 + while True: + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + attempts += 1 + if attempts == num_attempts: + return status, err.output + else: + break + + tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0] + # Construct command to trim the videos (ffmpeg required). + command = [ + 'ffmpeg', '-i', + '"%s"' % tmp_filename, '-ss', + str(start_time), '-t', + str(end_time - start_time), '-c:v', 'libx264', '-c:a', 'copy', + '-threads', '1', '-loglevel', 'panic', + '"%s"' % output_filename + ] + command = ' '.join(command) + try: + subprocess.check_output( + command, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + return status, err.output + + # Check if the video was successfully saved. + status = os.path.exists(output_filename) + os.remove(tmp_filename) + return status, 'Downloaded' + + +def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir): + """Wrapper for parallel processing purposes.""" + output_filename = construct_video_filename(row, label_to_dir, trim_format) + clip_id = os.path.basename(output_filename).split('.mp4')[0] + if os.path.exists(output_filename): + status = tuple([clip_id, True, 'Exists']) + return status + + downloaded, log = download_clip( + row['video-id'], + output_filename, + row['start-time'], + row['end-time'], + tmp_dir=tmp_dir) + status = tuple([clip_id, downloaded, log]) + return status + + +def parse_kinetics_annotations(input_csv, ignore_is_cc=False): + """Returns a parsed DataFrame. + arguments: + --------- + input_csv: str + Path to CSV file containing the following columns: + 'YouTube Identifier,Start time,End time,Class label' + returns: + ------- + dataset: DataFrame + Pandas with the following columns: + 'video-id', 'start-time', 'end-time', 'label-name' + """ + df = pd.read_csv(input_csv) + if 'youtube_id' in df.columns: + columns = OrderedDict([('youtube_id', 'video-id'), + ('time_start', 'start-time'), + ('time_end', 'end-time'), + ('label', 'label-name')]) + df.rename(columns=columns, inplace=True) + if ignore_is_cc: + df = df.loc[:, df.columns.tolist()[:-1]] + return df + + +def setup_new_annotations(input_csv, output_dir, subset): + if not os.path.exists(output_dir): + os.makedirs(output_dir) + anno_dir = os.path.join(output_dir, '../annotations') + if not os.path.exists(anno_dir): + os.makedirs(anno_dir) + anno_file = os.path.join(anno_dir, input_csv.split('/')[-1]) + fout = open(anno_file, 'w') + # read original annotation file + with open(input_csv, 'r') as f: + # write subset lines + for idx, line in enumerate(f.readlines()): + line_list = line.strip().split(',') + if idx > 0 and line_list[0] not in subset: + continue + fout.writelines("%s,%s,%s,%s,%s\n"%(line_list[0], line_list[1], line_list[2], line_list[3], line_list[4])) + fout.close() + return anno_file + + +def main(input_csv, + output_dir, + subset_file, + trim_format='%06d', + num_jobs=24, + tmp_dir='/tmp/kinetics'): + + # read the subset class list + subset = [] + with open(subset_file, 'r') as f: + for line in f.readlines(): + subset.append(line.strip()) + # setup the annotations of the subset + real_csv = setup_new_annotations(input_csv, output_dir, subset) + + # Reading and parsing Kinetics. + dataset = parse_kinetics_annotations(real_csv) + + # Creates folders where videos will be saved later. + label_to_dir = create_video_folders(dataset, output_dir, tmp_dir) + + # Download all clips. + if num_jobs == 1: + status_list = [] + for i, row in dataset.iterrows(): + status_list.append( + download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir)) + else: + status_list = Parallel( + n_jobs=num_jobs)(delayed(download_clip_wrapper)( + row, label_to_dir, trim_format, tmp_dir) + for i, row in dataset.iterrows()) + + # Clean tmp dir. + shutil.rmtree(tmp_dir) + + # Save download report. + with open('download_report.json', 'w') as fobj: + fobj.write(json.dumps(status_list)) + + +if __name__ == '__main__': + description = 'Helper script for downloading and trimming kinetics videos.' + p = argparse.ArgumentParser(description=description) + p.add_argument( + 'input_csv', + type=str, + help=('CSV file containing the following format: ' + 'YouTube Identifier,Start time,End time,Class label')) + p.add_argument( + 'output_dir', + type=str, + help='Output directory where videos will be saved.') + p.add_argument('-s', '--subset_file', type=str, help='the subset file of kinetics classes.') + p.add_argument( + '-f', + '--trim-format', + type=str, + default='%06d', + help=('This will be the format for the ' + 'filename of trimmed videos: ' + 'videoid_%0xd(start_time)_%0xd(end_time).mp4')) + p.add_argument('-n', '--num-jobs', type=int, default=24) + p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/kinetics') + # help='CSV file of the previous version of Kinetics.') + main(**vars(p.parse_args())) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_videos.sh new file mode 100644 index 0000000..0f49ed5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_videos.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# set up environment +conda env create -f environment.yml +source activate kinetics +pip install --upgrade youtube-dl + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +DATA_DIR="../../../data/${DATASET}" +ANNO_DIR="../../../data/${DATASET}/annotations" +python download.py ${ANNO_DIR}/kinetics_train.csv ${DATA_DIR}/videos_train +python download.py ${ANNO_DIR}/kinetics_val.csv ${DATA_DIR}/videos_val + +source deactivate kinetics +conda remove -n kinetics --all diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_videos_subset.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_videos_subset.sh new file mode 100644 index 0000000..dd32927 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/download_videos_subset.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# set up environment +conda env create -f environment.yml +source activate kinetics +pip install --upgrade youtube-dl + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] ; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400!" + exit 0 +fi + +DATA_DIR="../../../data/kinetics10" +ANNO_DIR="../../../data/${DATASET}/annotations" +SUBSET="./subset_list.txt" +python download_subset.py ${ANNO_DIR}/kinetics_train.csv ${DATA_DIR}/videos_train --subset_file ${SUBSET} -t /ssd/data/tmp/kinetics10 -n 1 +python download_subset.py ${ANNO_DIR}/kinetics_val.csv ${DATA_DIR}/videos_val --subset_file ${SUBSET} -t /ssd/data/tmp/kinetics10 -n 1 + +source deactivate kinetics +conda remove -n kinetics --all diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/environment.yml b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/environment.yml new file mode 100644 index 0000000..86e7e1a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/environment.yml @@ -0,0 +1,36 @@ +name: kinetics +channels: + - anaconda + - menpo + - conda-forge + - defaults +dependencies: + - ca-certificates=2020.1.1 + - certifi=2020.4.5.1 + - ffmpeg=2.8.6 + - libcxx=10.0.0 + - libedit=3.1.20181209 + - libffi=3.3 + - ncurses=6.2 + - openssl=1.1.1g + - pip=20.0.2 + - python=3.7.7 + - readline=8.0 + - setuptools=46.4.0 + - sqlite=3.31.1 + - tk=8.6.8 + - wheel=0.34.2 + - xz=5.2.5 + - zlib=1.2.11 + - pip: + - decorator==4.4.2 + - intel-openmp==2019.0 + - joblib==0.15.1 + - mkl==2019.0 + - numpy==1.18.4 + - olefile==0.46 + - pandas==1.0.3 + - python-dateutil==2.8.1 + - pytz==2020.1 + - six==1.14.0 + - youtube-dl==2020.5.8 diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_frames.sh new file mode 100644 index 0000000..fd2ce11 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_frames.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../ +python build_rawframes.py ../../data/${DATASET}/videos_train/ ../../data/${DATASET}/rawframes_train/ --level 2 --flow-type tvl1 --ext mp4 --task both --new-width 340 --new-height 256 +echo "Raw frames (RGB and tv-l1) Generated for train set" + +python build_rawframes.py ../../data/${DATASET}/videos_val/ ../../data/${DATASET}/rawframes_val/ --level 2 --flow-type tvl1 --ext mp4 --task both --new-width 340 --new-height 256 +echo "Raw frames (RGB and tv-l1) Generated for val set" + +cd ${DATASET}/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_rgb_frames.sh new file mode 100644 index 0000000..2db7fb4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_rgb_frames.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../ +python build_rawframes.py ../../data/${DATASET}/videos_train/ ../../data/${DATASET}/rawframes_train/ --level 2 --ext mp4 --task rgb --new-width 340 --new-height 256 +echo "Raw frames (RGB only) generated for train set" + +python build_rawframes.py ../../data/${DATASET}/videos_val/ ../../data/${DATASET}/rawframes_val/ --level 2 --ext mp4 --task rgb --new-width 340 --new-height 256 +echo "Raw frames (RGB only) generated for val set" + +cd ${DATASET}/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..c57fa4f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/extract_rgb_frames_opencv.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../ +python build_rawframes.py ../../data/${DATASET}/videos_train/ ../../data/${DATASET}/rawframes_train/ --level 2 --ext mp4 --task rgb --new-width 340 --new-height 256 --use-opencv +echo "Raw frames (RGB only) generated for train set" + +python build_rawframes.py ../../data/${DATASET}/videos_val/ ../../data/${DATASET}/rawframes_val/ --level 2 --ext mp4 --task rgb --new-width 340 --new-height 256 --use-opencv +echo "Raw frames (RGB only) generated for val set" + +cd ${DATASET}/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/generate_rawframes_filelist.sh new file mode 100644 index 0000000..22b2366 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/generate_rawframes_filelist.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py ${DATASET} data/${DATASET}/rawframes_train/ --level 2 --format rawframes --num-split 1 --subset train --shuffle +echo "Train filelist for rawframes generated." + +PYTHONPATH=. python tools/data/build_file_list.py ${DATASET} data/${DATASET}/rawframes_val/ --level 2 --format rawframes --num-split 1 --subset val --shuffle +echo "Val filelist for rawframes generated." +cd tools/data/${DATASET}/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/generate_videos_filelist.sh new file mode 100644 index 0000000..096d9b1 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/generate_videos_filelist.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "kinetics10" ] || [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py ${DATASET} data/${DATASET}/videos_train/ --level 2 --format videos --num-split 1 --subset train --shuffle +echo "Train filelist for video generated." + +PYTHONPATH=. python tools/data/build_file_list.py ${DATASET} data/${DATASET}/videos_val/ --level 2 --format videos --num-split 1 --subset val --shuffle +echo "Val filelist for video generated." +cd tools/data/${DATASET}/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/rename_classnames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/rename_classnames.sh new file mode 100644 index 0000000..c4cb8b4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/rename_classnames.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +pwd_dir=$pwd +# Rename classname for convenience +DATASET=$1 +if [ "$DATASET" == "kinetics10" ] || [ "$DATASET" == "kinetics400" ] || [ "$1" == "kinetics600" ] || [ "$1" == "kinetics700" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../../../data/${DATASET}/ +ls ./videos_train | while read class; do \ + newclass=`echo $class | tr " " "_" `; + if [ "${class}" != "${newclass}" ] + then + mv "videos_train/${class}" "videos_train/${newclass}"; + fi +done + +ls ./videos_val | while read class; do \ + newclass=`echo $class | tr " " "_" `; + if [ "${class}" != "${newclass}" ] + then + mv "videos_val/${class}" "videos_val/${newclass}"; + fi +done + +# cd ../../tools/data/${DATASET}/ +cd ${pwd_dir} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/subset_list.txt b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/subset_list.txt new file mode 100644 index 0000000..a2018d2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/kinetics/subset_list.txt @@ -0,0 +1,10 @@ +canoeing or kayaking +climbing a rope +driving car +golf driving +opening bottle +playing piano +playing volleyball +shooting goal (soccer) +surfing water +writing \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/build_file_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/build_file_list.py new file mode 100644 index 0000000..2e8ec7f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/build_file_list.py @@ -0,0 +1,34 @@ +import os, argparse +from sklearn.utils import shuffle + + + +def parse_args(): + parser = argparse.ArgumentParser(description='Build file list') + parser.add_argument('dataset', type=str, choices=['mimetics10', 'mimetics'], help='dataset to be built file list') + parser.add_argument('src_folder', type=str, help='root directory for the frames or videos') + parser.add_argument('list_file', type=str, help='file list result') + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + + filelist, labels = [], [] + for cls_id, labelname in enumerate(sorted(os.listdir(args.src_folder))): + video_path = os.path.join(args.src_folder, labelname) + for videoname in os.listdir(video_path): + # get the video file + video_file = os.path.join(labelname, videoname) + filelist.append(video_file) + # get the label + labels.append(str(cls_id)) + + filelist, labels = shuffle(filelist, labels) + + with open(args.list_file, 'w') as f: + for filepath, label in zip(filelist, labels): + print(filepath, label) + f.writelines('%s %s\n'%(filepath, label)) + + diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/check_data.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/check_data.sh new file mode 100644 index 0000000..636c16f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/check_data.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "mimetics10" ] || [ "$DATASET" == "mimetics" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support mimetics10 or mimetics" + exit 0 +fi + +pwd_dir=$pwd + +cd ../../../ +PYTHONPATH=. python tools/data/data_check.py data/${DATASET}/videos data/${DATASET}/${DATASET}_test_list_videos.txt test +echo "Test filelist for video passed checking." + +cd ${pwd_dir} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/download_annotations.sh new file mode 100644 index 0000000..2df9663 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/download_annotations.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +DATASET='mimetics' +DATA_DIR="../../../data/${DATASET}/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://europe.naverlabs.com/wp-content/uploads/2019/12/Mimetics_release_v1.0.zip + +unzip Mimetics_release_v1.0.zip -d ${DATA_DIR}/ + +rm Mimetics_release_v1.0.zip diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/download_videos_subset.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/download_videos_subset.sh new file mode 100644 index 0000000..cd7abd4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/download_videos_subset.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# set up environment +conda env create -f ../kinetics/environment.yml +source activate kinetics +pip install --upgrade youtube-dl + +DATA_DIR="../../../data/mimetics10" +ANNO_DIR="../../../data/mimetics/annotations" +SUBSET="../kinetics/subset_list.txt" +python ../kinetics/download_subset.py ${ANNO_DIR}/mimetics_v1.0.csv ${DATA_DIR}/videos --subset_file ${SUBSET} -t /ssd/data/tmp/mimetics10 -n 1 + +source deactivate kinetics +# conda remove -n kinetics --all diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/generate_videos_filelist.sh new file mode 100644 index 0000000..91d7c55 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/generate_videos_filelist.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +DATASET=$1 +if [ "$DATASET" == "mimetics10" ] || [ "$DATASET" == "mimetics" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support mimetics10, mimetics" + exit 0 +fi + +pwd_dir=$pwd +cd ../../../ + +PYTHONPATH=. python tools/data/mimetics/build_file_list.py ${DATASET} data/${DATASET}/videos/ data/${DATASET}/${DATASET}_test_list_videos.txt +echo "test filelist for video generated." + +cd ${pwd_dir} \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/rename_classnames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/rename_classnames.sh new file mode 100644 index 0000000..980d373 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mimetics/rename_classnames.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +pwd_dir=$pwd +# Rename classname for convenience +DATASET=$1 +if [ "$DATASET" == "mimetics10" ] || [ "$DATASET" == "mimetics" ]; then + echo "We are processing $DATASET" +else + echo "Bad Argument, we only support kinetics400, kinetics600 or kinetics700" + exit 0 +fi + +cd ../../../data/${DATASET}/ +ls ./videos | while read class; do \ + newclass=`echo $class | tr " " "_" `; + if [ "${class}" != "${newclass}" ] + then + mv "videos/${class}" "videos/${newclass}"; + fi +done + +cd ${pwd_dir} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/mit/README.md new file mode 100644 index 0000000..fc7f506 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/README.md @@ -0,0 +1,130 @@ +# Preparing Moments in Time + +## Introduction + +``` +@article{monfortmoments, + title={Moments in Time Dataset: one million videos for event understanding}, + author={Monfort, Mathew and Andonian, Alex and Zhou, Bolei and Ramakrishnan, Kandan and Bargal, Sarah Adel and Yan, Tom and Brown, Lisa and Fan, Quanfu and Gutfruend, Dan and Vondrick, Carl and others}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + year={2019}, + issn={0162-8828}, + pages={1--8}, + numpages={8}, + doi={10.1109/TPAMI.2019.2901464}, +} +``` + +For basic dataset information, you can refer to the dataset [website](http://moments.csail.mit.edu/). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/mit/`. + +## Step 1. Prepare Annotations and Videos + +First of all, you can run the following script to download the videos along with the annotations. + +```shell +bash download_data.sh +``` + +For better decoding speed, you can resize the original videos into smaller sized, densely encoded version by: + +``` +python ../resize_videos.py ../../../data/mit/videos/ ../../../data/mit/videos_256p_dense_cache --dense --level 2 +``` + +## Step 2. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. And you can run the following script to soft link the extracted frames. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/mit_extracted/ +ln -s /mnt/SSD/mit_extracted/ ../../../data/mit/rawframes +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +bash extract_rgb_frames_opencv.sh +``` + +If both are required, run the following script to extract frames. + +```shell +bash extract_frames.sh +``` + +## Step 4. Generate File List + +you can run the follow script to generate file list in the format of rawframes and videos. + +```shell +bash generate_{rawframes, videos}_filelist.sh +``` + +## Step 5. Check Directory Structure + +After the whole data process for Moments in Time preparation, +you will get the rawframes (RGB + Flow), videos and annotation files for Moments in Time. + +In the context of the whole project (for Moments in Time only), the folder structure will look like: + +``` +mmaction2 +├── data +│   └── mit +│   ├── annotations +│   │   ├── license.txt +│   │   ├── moments_categories.txt +│   │   ├── README.txt +│   │   ├── trainingSet.csv +│   │   └── validationSet.csv +│   ├── mit_train_rawframe_anno.txt +│   ├── mit_train_video_anno.txt +│   ├── mit_val_rawframe_anno.txt +│   ├── mit_val_video_anno.txt +│   ├── rawframes +│   │   ├── training +│   │   │   ├── adult+female+singing +│   │   │   │   ├── 0P3XG_vf91c_35 +│   │   │   │   │   ├── flow_x_00001.jpg +│   │   │   │   │   ├── flow_x_00002.jpg +│   │   │   │   │   ├── ... +│   │   │   │   │   ├── flow_y_00001.jpg +│   │   │   │   │   ├── flow_y_00002.jpg +│   │   │   │   │   ├── ... +│   │   │   │   │   ├── img_00001.jpg +│   │   │   │   │   └── img_00002.jpg +│   │   │   │   └── yt-zxQfALnTdfc_56 +│   │   │   │   │   ├── ... +│   │   │   └── yawning +│   │   │   ├── _8zmP1e-EjU_2 +│   │   │      │   ├── ... +│   │   └── validation +│   │   │   ├── ... +│   └── videos +│   ├── training +│   │   ├── adult+female+singing +│   │   │   ├── 0P3XG_vf91c_35.mp4 +│   │   │   ├── ... +│   │   │   └── yt-zxQfALnTdfc_56.mp4 +│   │   └── yawning +│   │   ├── ... +│   └── validation +│   │   ├── ... +└── mmaction +└── ... + +``` + +For training and evaluating on Moments in Time, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/download_data.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mit/download_data.sh new file mode 100644 index 0000000..fc8a1da --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/download_data.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/mit/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} + +wget http://data.csail.mit.edu/soundnet/actions3/split1/Moments_in_Time_Raw.zip +unzip Moments_in_Time_Raw.zip +rm Moments_in_Time_Raw.zip + +if [ ! -d "./videos" ]; then + mkdir ./videos +fi +mv ./training ./videos && mv ./validation ./video + +if [ ! -d "./annotations" ]; then + mkdir ./annotations +fi + +mv *.txt annotations && mv *.csv annotations + +cd "../../tools/data/mit" diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_frames.sh new file mode 100644 index 0000000..477fb8d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_frames.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/mit/videos/training ../../data/mit/rawframes/training/ --level 2 --flow-type tvl1 --ext mp4 --task both +echo "Raw frames (RGB and tv-l1) Generated for train set" + +python build_rawframes.py ../../data/mit/vides/validation/ ../../data/mit/rawframes/validation/ --level 2 --flow-type tvl1 --ext mp4 --task both +echo "Raw frames (RGB and tv-l1) Generated for val set" + +cd mit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_rgb_frames.sh new file mode 100644 index 0000000..4b468b5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_rgb_frames.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/mit/videos/training ../../data/mit/rawframes/training/ --level 2 --ext mp4 --task rgb +echo "Raw frames (RGB only) generated for train set" + +python build_rawframes.py ../../data/mit/videos/validation ../../data/mit/rawframes/validation/ --level 2 --ext mp4 --task rgb +echo "Raw frames (RGB only) generated for val set" + +cd mit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..004f6e4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/extract_rgb_frames_opencv.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/mit/videos/training ../../data/mit/rawframes/training/ --level 2 --ext mp4 --task rgb --use-opencv +echo "Raw frames (RGB only) generated for train set" + +python build_rawframes.py ../../data/mit/videos/validation ../../data/mit/rawframes/validation/ --level 2 --ext mp4 --task rgb --use-opencv +echo "Raw frames (RGB only) generated for val set" + +cd mit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/fix_video_filelist.py b/Downstream/Open-Set-Action-Recognition/tools/data/mit/fix_video_filelist.py new file mode 100644 index 0000000..a06c2ac --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/fix_video_filelist.py @@ -0,0 +1,27 @@ +import os +from tqdm import tqdm + + +def fix_listfile(file_split, phase): + assert os.path.exists(file_split), 'File does not exist! %s'%(file_split) + filename = file_split.split('/')[-1] + file_split_new = os.path.join(dataset_path, 'temp_' + filename) + with open(file_split_new, 'w') as fw: + with open(file_split, 'r') as fr: + for line in tqdm(fr.readlines(), desc=phase): + foldername = line.split('/')[0] + if foldername == phase: + continue + fw.writelines(phase + '/' + line) + os.remove(file_split) + os.rename(file_split_new, file_split) + + +if __name__ == '__main__': + dataset_path = '../../../data/mit' + + file_split = os.path.join(dataset_path, 'mit_train_list_videos.txt') + fix_listfile(file_split, 'training') + + file_split = os.path.join(dataset_path, 'mit_val_list_videos.txt') + fix_listfile(file_split, 'validation') \ No newline at end of file diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mit/generate_rawframes_filelist.sh new file mode 100644 index 0000000..f53bcde --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/generate_rawframes_filelist.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py mit data/mit/rawframes/training/ --level 2 --format rawframes --num-split 1 --subset train --shuffle +echo "Train filelist for rawframes generated." + +PYTHONPATH=. python tools/data/build_file_list.py mit data/mit/rawframes/validation/ --level 2 --format rawframes --num-split 1 --subset val --shuffle +echo "Val filelist for rawframes generated." +cd tools/data/mit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mit/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mit/generate_videos_filelist.sh new file mode 100644 index 0000000..390d4eb --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mit/generate_videos_filelist.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py mit data/mit/videos/training/ --level 2 --format videos --num-split 1 --subset train --shuffle +echo "Train filelist for videos generated." + +PYTHONPATH=. python tools/data/build_file_list.py mit data/mit/videos/validation/ --level 2 --format videos --num-split 1 --subset val --shuffle +echo "Val filelist for videos generated." +cd tools/data/mit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/README.md new file mode 100644 index 0000000..025bf0c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/README.md @@ -0,0 +1,114 @@ +# Preparing Multi-Moments in Time + +## Introduction + +``` +@misc{monfort2019multimoments, + title={Multi-Moments in Time: Learning and Interpreting Models for Multi-Action Video Understanding}, + author={Mathew Monfort and Kandan Ramakrishnan and Alex Andonian and Barry A McNamara and Alex Lascelles, Bowen Pan, Quanfu Fan, Dan Gutfreund, Rogerio Feris, Aude Oliva}, + year={2019}, + eprint={1911.00232}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +For basic dataset information, you can refer to the dataset [website](http://moments.csail.mit.edu). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/mmit/`. + +## Step 1. Prepare Annotations and Videos + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_data.sh +``` + +For better decoding speed, you can resize the original videos into smaller sized, densely encoded version by: + +``` +python ../resize_videos.py ../../../data/mmit/videos/ ../../../data/mmit/videos_256p_dense_cache --dense --level 2 +``` + +## Step 2. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +First, you can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/mmit_extracted/ +ln -s /mnt/SSD/mmit_extracted/ ../../../data/mmit/rawframes +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +bash extract_rgb_frames_opencv.sh +``` + +If both are required, run the following script to extract frames using "tvl1" algorithm. + +```shell +bash extract_frames.sh +``` +## Step 3. Generate File List + +you can run the follow script to generate file list in the format of rawframes or videos. + +```shell +bash generate_rawframes_filelist.sh +bash generate_videos_filelist.sh +``` + +## Step 4. Check Directory Structure + +After the whole data process for Multi-Moments in Time preparation, +you will get the rawframes (RGB + Flow), videos and annotation files for Multi-Moments in Time. + +In the context of the whole project (for Multi-Moments in Time only), the folder structure will look like: + +``` +mmaction2/ +└── data + └── mmit + ├── annotations + │   ├── moments_categories.txt + │   ├── trainingSet.txt + │   └── validationSet.txt + ├── mmit_train_rawframes.txt + ├── mmit_train_videos.txt + ├── mmit_val_rawframes.txt + ├── mmit_val_videos.txt + ├── rawframes + │   ├── 0-3-6-2-9-1-2-6-14603629126_5 + │   │   ├── flow_x_00001.jpg + │   │   ├── flow_x_00002.jpg + │   │   ├── ... + │   │   ├── flow_y_00001.jpg + │   │   ├── flow_y_00002.jpg + │   │   ├── ... + │   │   ├── img_00001.jpg + │   │   └── img_00002.jpg + │   │   ├── ... + │   └── yt-zxQfALnTdfc_56 + │   │   ├── ... + │   └── ... + + └── videos + └── adult+female+singing + ├── 0-3-6-2-9-1-2-6-14603629126_5.mp4 + └── yt-zxQfALnTdfc_56.mp4 + └── ... +``` + +For training and evaluating on Multi-Moments in Time, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/download_data.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/download_data.sh new file mode 100644 index 0000000..8792849 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/download_data.sh @@ -0,0 +1,21 @@ +DATA_DIR="../../../data/mmit/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} + +wget -c https://www.dropbox.com/s/sz3yd1o0gf09amh/Multi_Moments_in_Time.zip?dl=0 + +unzip Multi_Moments_in_Time_Raw.zip +rm Multi_Moments_in_Time.zip + +if [ ! -d "./annotations" ]; then + mkdir ./annotations +fi + +mv *.txt annotations && mv *.csv annotations + +cd - diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_frames.sh new file mode 100644 index 0000000..548f862 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_frames.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/mmit/videos/ ../../../data/mmit/rawframes/ --task both --level 2 --flow-type tvl1 --ext mp4 +echo "Raw frames (RGB and Flow) Generated" +cd mmit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_rgb_frames.sh new file mode 100644 index 0000000..869b709 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_rgb_frames.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/mmit/videos/ ../../data/mmit/rawframes/ --task rgb --level 2 --ext mp4 + +echo "Genearte raw frames (RGB only)" + +cd mmit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..5d09f05 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/extract_rgb_frames_opencv.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/mmit/videos/ ../../data/mmit/rawframes/ --task rgb --level 2 --ext mp4 --use-opencv + +echo "Genearte raw frames (RGB only)" + +cd mmit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/generate_rawframes_filelist.sh new file mode 100644 index 0000000..2e745c3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/generate_rawframes_filelist.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py mmit data/mmit/rawframes/ --level 2 --format rawframes --num-split 1 --subset train --shuffle +echo "Train filelist for rawframes generated." + +PYTHONPATH=. python tools/data/build_file_list.py mmit data/mmit/rawframes/ --level 2 --format rawframes --num-split 1 --subset val --shuffle +echo "Val filelist for rawframes generated." +cd tools/data/mmit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/mmit/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/generate_videos_filelist.sh new file mode 100644 index 0000000..1fa1f3f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/mmit/generate_videos_filelist.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py mmit data/mmit/videos/ --level 2 --format videos --num-split 1 --subset train --shuffle +echo "Train filelist for videos generated." + +PYTHONPATH=. python tools/data/build_file_list.py mmit data/mmit/videos/ --level 2 --format videos --num-split 1 --subset val --shuffle +echo "Val filelist for videos generated." +cd tools/data/mmit/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/omnisource/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/omnisource/README.md new file mode 100644 index 0000000..b8144c2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/omnisource/README.md @@ -0,0 +1,148 @@ +## Preparing OmniSource + +## Introduction + +``` +@article{duan2020omni, + title={Omni-sourced Webly-supervised Learning for Video Recognition}, + author={Duan, Haodong and Zhao, Yue and Xiong, Yuanjun and Liu, Wentao and Lin, Dahua}, + journal={arXiv preprint arXiv:2003.13042}, + year={2020} +} +``` + +We release a subset of the OmniSource web dataset used in the paper [Omni-sourced Webly-supervised Learning for Video Recognition](https://arxiv.org/abs/2003.13042). Since all web dataset in OmniSource are built based on the Kinetics-400 taxonomy, we select those web data related to the 200 classes in Mini-Kinetics subset (which is proposed in [Rethinking Spatiotemporal Feature Learning: Speed-Accuracy Trade-offs in Video Classification](https://arxiv.org/pdf/1712.04851.pdf)). + +We provide data from all sources that are related to the 200 classes in Mini-Kinetics (including Kinetics trimmed clips, Kinetics untrimmed videos, images from Google and Instagram, video clips from Instagram). To obtain this dataset, please first fill in the [request form](https://docs.google.com/forms/d/e/1FAIpQLSd8_GlmHzG8FcDbW-OEu__G7qLgOSYZpH-i5vYVJcu7wcb_TQ/viewform?usp=sf_link). We will share the download link to you after your request is received. Since we release all data crawled from the web without any filtering, the dataset is large and it may take some time to download them. We describe the size of the datasets in the following table: + +| Dataset Name | #samples | Size | Teacher Model | #samples after filtering | #samples similar to k200_val | +| :-------------: | :------: | :-----: | :--------------: | :----------------------: | :--------------------------: | +| k200_train | 76030 | 45.6G | N/A | N/A | N/A | +| k200_val | 4838 | 2.9G | N/A | N/A | N/A | +| googleimage_200 | 3050880 | 265.5G | TSN-R50-8seg | 1188695 | 967 | +| insimage_200 | 3654650 | 224.4G | TSN-R50-8seg | 879726 | 116 | +| insvideo_200 | 732855 | 1487.6G | SlowOnly-8x8-R50 | 330680 | 956 | +| k200_raw_train | 76027 | 963.5G | SlowOnly-8x8-R50 | N/A | N/A | + +The file structure of our uploaded OmniSource dataset looks like: + +``` +OmniSource/ +├── annotations +│ ├── googleimage_200 +│ │ ├── googleimage_200.txt File list of all valid images crawled from Google. +│ │ ├── tsn_8seg_googleimage_200_duplicate.txt Postive file list of images crawled from Google, which is similar to a validation example. +│ │ ├── tsn_8seg_googleimage_200.txt Postive file list of images crawled from Google, filtered by the teacher model. +│ │ └── tsn_8seg_googleimage_200_wodup.txt Postive file list of images crawled from Google, filtered by the teacher model, after de-duplication. +│ ├── insimage_200 +│ │ ├── insimage_200.txt +│ │ ├── tsn_8seg_insimage_200_duplicate.txt +│ │ ├── tsn_8seg_insimage_200.txt +│ │ └── tsn_8seg_insimage_200_wodup.txt +│ ├── insvideo_200 +│ │ ├── insvideo_200.txt +│ │ ├── slowonly_8x8_insvideo_200_duplicate.txt +│ │ ├── slowonly_8x8_insvideo_200.txt +│ │ └── slowonly_8x8_insvideo_200_wodup.txt +│ ├── k200_actions.txt The list of action names of the 200 classes in MiniKinetics. +│ ├── K400_to_MiniKinetics_classidx_mapping.json The index mapping from Kinetics-400 to MiniKinetics. +│ ├── kinetics_200 +│ │ ├── k200_train.txt +│ │ └── k200_val.txt +│ ├── kinetics_raw_200 +│ │ └── slowonly_8x8_kinetics_raw_200.json Kinetics Raw Clips filtered by the teacher model. +│ └── webimage_200 +│ └── tsn_8seg_webimage_200_wodup.txt The union of `tsn_8seg_googleimage_200_wodup.txt` and `tsn_8seg_insimage_200_wodup.txt` +├── googleimage_200 (10 volumes) +│ ├── vol_0.tar +│ ├── ... +│ └── vol_9.tar +├── insimage_200 (10 volumes) +│ ├── vol_0.tar +│ ├── ... +│ └── vol_9.tar +├── insvideo_200 (20 volumes) +│ ├── vol_00.tar +│ ├── ... +│ └── vol_19.tar +├── kinetics_200_train +│ └── kinetics_200_train.tar +├── kinetics_200_val +│ └── kinetics_200_val.tar +└── kinetics_raw_200_train (16 volumes) + ├── vol_0.tar + ├── ... + └── vol_15.tar +``` + +## Data Preparation + +For data preparation, you need to first download those data. For `kinetics_200` and 3 web datasets: `googleimage_200`, `insimage_200` and `insvideo_200`, you just need to extract each volume and merge their contents. + +For Kinetics raw videos, since loading long videos is very heavy, you need to first trim it into clips. Here we provide a script named `trim_raw_video.py`. It trims a long video into 10-second clips and remove the original raw video. You can use it to trim the Kinetics raw video. + +The data should be placed in `data/OmniSource/`. When data preparation finished, the folder structure of `data/OmniSource` looks like (We omit the files not needed in training & testing for simplicity): + +``` +data/OmniSource/ +├── annotations +│ ├── googleimage_200 +│ │ └── tsn_8seg_googleimage_200_wodup.txt Postive file list of images crawled from Google, filtered by the teacher model, after de-duplication. +│ ├── insimage_200 +│ │ └── tsn_8seg_insimage_200_wodup.txt +│ ├── insvideo_200 +│ │ └── slowonly_8x8_insvideo_200_wodup.txt +│ ├── kinetics_200 +│ │ ├── k200_train.txt +│ │ └── k200_val.txt +│ ├── kinetics_raw_200 +│ │ └── slowonly_8x8_kinetics_raw_200.json Kinetics Raw Clips filtered by the teacher model. +│ └── webimage_200 +│ └── tsn_8seg_webimage_200_wodup.txt The union of `tsn_8seg_googleimage_200_wodup.txt` and `tsn_8seg_insimage_200_wodup.txt` +├── googleimage_200 +│ ├── 000 +| │ ├── 00 +| │ │ ├── 000001.jpg +| │ │ ├── ... +| │ │ └── 000901.jpg +| │ ├── ... +│ │ └── 19 +│ ├── ... +│ └── 199 +├── insimage_200 +│ ├── 000 +| │ ├── abseil +| │ │ ├── 1J9tKWCNgV_0.jpg +| │ │ ├── ... +| │ │ └── 1J9tKWCNgV_0.jpg +│ │ └── abseiling +│ ├── ... +│ └── 199 +├── insvideo_200 +│ ├── 000 +| │ ├── abseil +| │ │ ├── B00arxogubl.mp4 +| │ │ ├── ... +| │ │ └── BzYsP0HIvbt.mp4 +│ │ └── abseiling +│ ├── ... +│ └── 199 +├── kinetics_200_train +│ ├── 0074cdXclLU.mp4 +| ├── ... +| ├── zzzlyL61Fyo.mp4 +├── kinetics_200_val +│ ├── 01fAWEHzudA.mp4 +| ├── ... +| ├── zymA_6jZIz4.mp4 +└── kinetics_raw_200_train + ├── pref_ + | ├── ___dTOdxzXY + | | ├── part_0.mp4 + | | ├── ... + | | └── part_6.mp4 + | ├── ... + | └── _zygwGDE2EM + ├── ... + └── prefZ +``` diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/omnisource/trim_raw_video.py b/Downstream/Open-Set-Action-Recognition/tools/data/omnisource/trim_raw_video.py new file mode 100644 index 0000000..32cfca8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/omnisource/trim_raw_video.py @@ -0,0 +1,44 @@ +import os +import os.path as osp +import sys +from subprocess import check_output + +import mmcv + + +def get_duration(vid_name): + command = f'ffprobe -i {vid_name} 2>&1 | grep "Duration"' + output = str(check_output(command, shell=True)) + output = output.split(',')[0].split('Duration:')[1].strip() + h, m, s = output.split(':') + duration = int(h) * 3600 + int(m) * 60 + float(s) + return duration + + +def trim(vid_name): + try: + lt = get_duration(vid_name) + except Exception: + print(f'get_duration failed for video {vid_name}', flush=True) + return + + i = 0 + name, _ = osp.splitext(vid_name) + + # We output 10-second clips into the folder `name` + dest = name + mmcv.mkdir_or_exist(dest) + + command_tmpl = ('ffmpeg -y loglevel error -i {} -ss {} -t {} -crf 18 ' + '-c:v libx264 {}/part_{}.mp4') + while i * 10 < lt: + os.system(command_tmpl.format(vid_name, i * 10, 10, dest, i)) + i += 1 + + # remove a raw video after decomposing it into 10-second clip to save space + os.remove(vid_name) + + +if __name__ == '__main__': + vid_name = sys.argv[1] + trim(vid_name) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/parse_file_list.py b/Downstream/Open-Set-Action-Recognition/tools/data/parse_file_list.py new file mode 100644 index 0000000..f860159 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/parse_file_list.py @@ -0,0 +1,499 @@ +import csv +import fnmatch +import glob +import json +import os +import os.path as osp + + +def parse_directory(path, + rgb_prefix='img_', + flow_x_prefix='flow_x_', + flow_y_prefix='flow_y_', + level=1): + """Parse directories holding extracted frames from standard benchmarks. + + Args: + path (str): Directory path to parse frames. + rgb_prefix (str): Prefix of generated rgb frames name. + default: 'img_'. + flow_x_prefix (str): Prefix of generated flow x name. + default: `flow_x_`. + flow_y_prefix (str): Prefix of generated flow y name. + default: `flow_y_`. + level (int): Directory level for glob searching. Options are 1 and 2. + default: 1. + + Returns: + dict: frame info dict with video id as key and tuple(path(str), + rgb_num(int), flow_x_num(int)) as value. + """ + print(f'parse frames under directory {path}') + if level == 1: + # Only search for one-level directory + def locate_directory(x): + return osp.basename(x) + + frame_dirs = glob.glob(osp.join(path, '*')) + + elif level == 2: + # search for two-level directory + def locate_directory(x): + return osp.join(osp.basename(osp.dirname(x)), osp.basename(x)) + + frame_dirs = glob.glob(osp.join(path, '*', '*')) + + else: + raise ValueError('level can be only 1 or 2') + + def count_files(directory, prefix_list): + """Count file number with a given directory and prefix. + + Args: + directory (str): Data directory to be search. + prefix_list (list): List or prefix. + + Returns: + list (int): Number list of the file with the prefix. + """ + lst = os.listdir(directory) + cnt_list = [len(fnmatch.filter(lst, x + '*')) for x in prefix_list] + return cnt_list + + # check RGB + frame_dict = {} + for i, frame_dir in enumerate(frame_dirs): + total_num = count_files(frame_dir, + (rgb_prefix, flow_x_prefix, flow_y_prefix)) + dir_name = locate_directory(frame_dir) + + num_x = total_num[1] + num_y = total_num[2] + if num_x != num_y: + raise ValueError(f'x and y direction have different number ' + f'of flow images in video directory: {frame_dir}') + if i % 200 == 0: + print(f'{i} videos parsed') + frame_dict[dir_name] = (frame_dir, total_num[0], num_x) + + print('frame directory analysis done') + return frame_dict + + +def parse_ucf101_splits(level): + """Parse UCF-101 dataset into "train", "val", "test" splits. + + Args: + level (int): Directory level of data. 1 for the single-level directory, + 2 for the two-level directory. + + Returns: + list: "train", "val", "test" splits of UCF-101. + """ + class_index_file = 'data/ucf101/annotations/classInd.txt' + train_file_template = 'data/ucf101/annotations/trainlist{:02d}.txt' + test_file_template = 'data/ucf101/annotations/testlist{:02d}.txt' + + with open(class_index_file, 'r') as fin: + class_index = [x.strip().split() for x in fin] + class_mapping = {x[1]: int(x[0]) - 1 for x in class_index} + + def line_to_map(line): + """A function to map line string to video and label. + + Args: + line (str): A long directory path, which is a text path. + + Returns: + tuple[str, str]: (video, label), video is the video id, + label is the video label. + """ + items = line.strip().split() + video = osp.splitext(items[0])[0] + if level == 1: + video = osp.basename(video) + label = items[0] + elif level == 2: + video = osp.join( + osp.basename(osp.dirname(video)), osp.basename(video)) + label = class_mapping[osp.dirname(items[0])] + return video, label + + splits = [] + for i in range(1, 4): + with open(train_file_template.format(i), 'r') as fin: + train_list = [line_to_map(x) for x in fin] + + with open(test_file_template.format(i), 'r') as fin: + test_list = [line_to_map(x) for x in fin] + splits.append((train_list, test_list)) + + return splits + + +def parse_jester_splits(level): + """Parse Jester into "train", "val" splits. + + Args: + level (int): Directory level of data. 1 for the single-level directory, + 2 for the two-level directory. + + Returns: + list: "train", "val", "test" splits of Jester dataset. + """ + # Read the annotations + class_index_file = 'data/jester/annotations/jester-v1-labels.csv' + train_file = 'data/jester/annotations/jester-v1-train.csv' + val_file = 'data/jester/annotations/jester-v1-validation.csv' + test_file = 'data/jester/annotations/jester-v1-test.csv' + + with open(class_index_file, 'r') as fin: + class_index = [x.strip() for x in fin] + class_mapping = {class_index[idx]: idx for idx in range(len(class_index))} + + def line_to_map(line, test_mode=False): + items = line.strip().split(';') + video = items[0] + if level == 1: + video = osp.basename(video) + elif level == 2: + video = osp.join( + osp.basename(osp.dirname(video)), osp.basename(video)) + if test_mode: + return video + else: + label = class_mapping[items[1]] + return video, label + + with open(train_file, 'r') as fin: + train_list = [line_to_map(x) for x in fin] + + with open(val_file, 'r') as fin: + val_list = [line_to_map(x) for x in fin] + + with open(test_file, 'r') as fin: + test_list = [line_to_map(x, test_mode=True) for x in fin] + + splits = ((train_list, val_list, test_list), ) + return splits + + +def parse_sthv1_splits(level): + """Parse Something-Something dataset V1 into "train", "val" splits. + + Args: + level (int): Directory level of data. 1 for the single-level directory, + 2 for the two-level directory. + + Returns: + list: "train", "val", "test" splits of Something-Something V1 dataset. + """ + # Read the annotations + # yapf: disable + class_index_file = 'data/sthv1/annotations/something-something-v1-labels.csv' # noqa + # yapf: enable + train_file = 'data/sthv1/annotations/something-something-v1-train.csv' + val_file = 'data/sthv1/annotations/something-something-v1-validation.csv' + test_file = 'data/sthv1/annotations/something-something-v1-test.csv' + + with open(class_index_file, 'r') as fin: + class_index = [x.strip() for x in fin] + class_mapping = {class_index[idx]: idx for idx in range(len(class_index))} + + def line_to_map(line, test_mode=False): + items = line.strip().split(';') + video = items[0] + if level == 1: + video = osp.basename(video) + elif level == 2: + video = osp.join( + osp.basename(osp.dirname(video)), osp.basename(video)) + if test_mode: + return video + else: + label = class_mapping[items[1]] + return video, label + + with open(train_file, 'r') as fin: + train_list = [line_to_map(x) for x in fin] + + with open(val_file, 'r') as fin: + val_list = [line_to_map(x) for x in fin] + + with open(test_file, 'r') as fin: + test_list = [line_to_map(x, test_mode=True) for x in fin] + + splits = ((train_list, val_list, test_list), ) + return splits + + +def parse_sthv2_splits(level): + """Parse Something-Something dataset V2 into "train", "val" splits. + + Args: + level (int): Directory level of data. 1 for the single-level directory, + 2 for the two-level directory. + + Returns: + list: "train", "val", "test" splits of Something-Something V2 dataset. + """ + # Read the annotations + # yapf: disable + class_index_file = 'data/sthv2/annotations/something-something-v2-labels.json' # noqa + # yapf: enable + train_file = 'data/sthv2/annotations/something-something-v2-train.json' + val_file = 'data/sthv2/annotations/something-something-v2-validation.json' + test_file = 'data/sthv2/annotations/something-something-v2-test.json' + + with open(class_index_file, 'r') as fin: + class_mapping = json.loads(fin.read()) + + def line_to_map(item, test_mode=False): + video = item['id'] + if level == 1: + video = osp.basename(video) + elif level == 2: + video = osp.join( + osp.basename(osp.dirname(video)), osp.basename(video)) + if test_mode: + return video + else: + template = item['template'].replace('[', '') + template = template.replace(']', '') + label = int(class_mapping[template]) + return video, label + + with open(train_file, 'r') as fin: + items = json.loads(fin.read()) + train_list = [line_to_map(item) for item in items] + + with open(val_file, 'r') as fin: + items = json.loads(fin.read()) + val_list = [line_to_map(item) for item in items] + + with open(test_file, 'r') as fin: + items = json.loads(fin.read()) + test_list = [line_to_map(item, test_mode=True) for item in items] + + splits = ((train_list, val_list, test_list), ) + return splits + + +def parse_mmit_splits(): + """Parse Multi-Moments in Time dataset into "train", "val" splits. + + Returns: + list: "train", "val", "test" splits of Multi-Moments in Time. + """ + + # Read the annotations + def line_to_map(x): + video = osp.splitext(x[0])[0] + labels = [int(digit) for digit in x[1:]] + return video, labels + + csv_reader = csv.reader(open('data/mmit/annotations/trainingSet.csv')) + train_list = [line_to_map(x) for x in csv_reader] + + csv_reader = csv.reader(open('data/mmit/annotations/validationSet.csv')) + val_list = [line_to_map(x) for x in csv_reader] + + test_list = val_list # not test for mit + + splits = ((train_list, val_list, test_list), ) + return splits + + +def parse_kinetics_splits(level, dataset): + """Parse Kinetics dataset into "train", "val", "test" splits. + + Args: + level (int): Directory level of data. 1 for the single-level directory, + 2 for the two-level directory. + dataset (str): Denotes the version of Kinetics that needs to be parsed, + choices are "kinetics400", "kinetics600" and "kinetics700". + + Returns: + list: "train", "val", "test" splits of Kinetics. + """ + + def convert_label(s, keep_whitespaces=False): + """Convert label name to a formal string. + + Remove redundant '"' and convert whitespace to '_'. + + Args: + s (str): String to be converted. + keep_whitespaces(bool): Whether to keep whitespace. Default: False. + + Returns: + str: Converted string. + """ + if not keep_whitespaces: + return s.replace('"', '').replace(' ', '_') + else: + return s.replace('"', '') + + def line_to_map(x, test=False): + """A function to map line string to video and label. + + Args: + x (str): A single line from Kinetics csv file. + test (bool): Indicate whether the line comes from test + annotation file. + + Returns: + tuple[str, str]: (video, label), video is the video id, + label is the video label. + """ + if test: + # video = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}' + video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' + label = -1 # label unknown + return video, label + else: + video = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' + if level == 2: + video = f'{convert_label(x[0])}/{video}' + else: + assert level == 1 + label = class_mapping[convert_label(x[0])] + return video, label + + train_file = f'data/{dataset}/annotations/kinetics_train.csv' + val_file = f'data/{dataset}/annotations/kinetics_val.csv' + test_file = f'data/{dataset}/annotations/kinetics_test.csv' + + csv_reader = csv.reader(open(train_file)) + # skip the first line + next(csv_reader) + + labels_sorted = sorted(set([convert_label(row[0]) for row in csv_reader])) + class_mapping = {label: i for i, label in enumerate(labels_sorted)} + + csv_reader = csv.reader(open(train_file)) + next(csv_reader) + train_list = [line_to_map(x) for x in csv_reader] + + csv_reader = csv.reader(open(val_file)) + next(csv_reader) + val_list = [line_to_map(x) for x in csv_reader] + + csv_reader = csv.reader(open(test_file)) + next(csv_reader) + test_list = [line_to_map(x, test=True) for x in csv_reader] + + splits = ((train_list, val_list, test_list), ) + return splits + + +def parse_mit_splits(): + """Parse Moments in Time dataset into "train", "val" splits. + + Returns: + list: "train", "val", "test" splits of Moments in Time. + """ + # Read the annotations + class_mapping = {} + with open('data/mit/annotations/moments_categories.txt') as f_cat: + for line in f_cat.readlines(): + cat, digit = line.rstrip().split(',') + class_mapping[cat] = int(digit) + + def line_to_map(x): + video = osp.splitext(x[0])[0] + label = class_mapping[osp.dirname(x[0])] + return video, label + + csv_reader = csv.reader(open('data/mit/annotations/trainingSet.csv')) + train_list = [line_to_map(x) for x in csv_reader] + + csv_reader = csv.reader(open('data/mit/annotations/validationSet.csv')) + val_list = [line_to_map(x) for x in csv_reader] + + test_list = val_list # no test for mit + + splits = ((train_list, val_list, test_list), ) + return splits + + +def parse_hmdb51_split(level): + train_file_template = 'data/hmdb51/annotations/trainlist{:02d}.txt' + test_file_template = 'data/hmdb51/annotations/testlist{:02d}.txt' + class_index_file = 'data/hmdb51/annotations/classInd.txt' + + def generate_class_index_file(): + """This function will generate a `ClassInd.txt` for HMDB51 in a format + like UCF101, where class id starts with 1.""" + frame_path = 'data/hmdb51/rawframes' + annotation_dir = 'data/hmdb51/annotations' + + class_list = sorted(os.listdir(frame_path)) + class_dict = dict() + with open(class_index_file, 'w') as f: + content = [] + for class_id, class_name in enumerate(class_list): + # like `ClassInd.txt` in UCF-101, the class_id begins with 1 + class_dict[class_name] = class_id + 1 + cur_line = ' '.join([str(class_id + 1), class_name]) + content.append(cur_line) + content = '\n'.join(content) + f.write(content) + + for i in range(1, 4): + train_content = [] + test_content = [] + for class_name in class_dict: + filename = class_name + f'_test_split{i}.txt' + filename_path = osp.join(annotation_dir, filename) + with open(filename_path, 'r') as fin: + for line in fin: + video_info = line.strip().split() + video_name = video_info[0] + if video_info[1] == '1': + target_line = ' '.join([ + osp.join(class_name, video_name), + str(class_dict[class_name]) + ]) + train_content.append(target_line) + elif video_info[1] == '2': + target_line = ' '.join([ + osp.join(class_name, video_name), + str(class_dict[class_name]) + ]) + test_content.append(target_line) + train_content = '\n'.join(train_content) + test_content = '\n'.join(test_content) + with open(train_file_template.format(i), 'w') as fout: + fout.write(train_content) + with open(test_file_template.format(i), 'w') as fout: + fout.write(test_content) + + if not osp.exists(class_index_file): + generate_class_index_file() + + with open(class_index_file, 'r') as fin: + class_index = [x.strip().split() for x in fin] + class_mapping = {x[1]: int(x[0]) - 1 for x in class_index} + + def line_to_map(line): + items = line.strip().split() + video = osp.splitext(items[0])[0] + if level == 1: + video = osp.basename(video) + elif level == 2: + video = osp.join( + osp.basename(osp.dirname(video)), osp.basename(video)) + label = class_mapping[osp.dirname(items[0])] + return video, label + + splits = [] + for i in range(1, 4): + with open(train_file_template.format(i), 'r') as fin: + train_list = [line_to_map(x) for x in fin] + + with open(test_file_template.format(i), 'r') as fin: + test_list = [line_to_map(x) for x in fin] + splits.append((train_list, test_list)) + + return splits diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/resize_video.py b/Downstream/Open-Set-Action-Recognition/tools/data/resize_video.py new file mode 100644 index 0000000..2bfe4f5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/resize_video.py @@ -0,0 +1,110 @@ +import argparse +import glob +import os +import os.path as osp +import sys +from multiprocessing import Pool + + +def resize_videos(vid_item): + """Generate resized video cache. + + Args: + vid_item (list): Video item containing video full path, + video relative path. + + Returns: + bool: Whether generate video cache successfully. + """ + full_path, vid_path = vid_item + out_full_path = osp.join(args.out_dir, vid_path) + dir_name = osp.dirname(vid_path) + out_dir = osp.join(args.out_dir, dir_name) + if not osp.exists(out_dir): + os.makedirs(out_dir) + result = os.popen( + f'ffprobe -hide_banner -loglevel error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 {full_path}' # noqa:E501 + ) + w, h = [int(d) for d in result.readline().rstrip().split(',')] + if w > h: + cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} ' + f'-vf {"mpdecimate," if args.remove_dup else ""}' + f'scale=-2:{args.scale} ' + f'{"-vsync vfr" if args.remove_dup else ""} ' + f'-c:v libx264 {"-g 16" if args.dense else ""} ' + f'-an {out_full_path} -y') + else: + cmd = (f'ffmpeg -hide_banner -loglevel error -i {full_path} ' + f'-vf {"mpdecimate," if args.remove_dup else ""}' + f'scale={args.scale}:-2 ' + f'{"-vsync vfr" if args.remove_dup else ""} ' + f'-c:v libx264 {"-g 16" if args.dense else ""} ' + f'-an {out_full_path} -y') + os.popen(cmd) + print(f'{vid_path} done') + sys.stdout.flush() + return True + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Generate the resized cache of original videos') + parser.add_argument('src_dir', type=str, help='source video directory') + parser.add_argument('out_dir', type=str, help='output video directory') + parser.add_argument( + '--dense', + action='store_true', + help='whether to generate a faster cache') + parser.add_argument( + '--level', + type=int, + choices=[1, 2], + default=2, + help='directory level of data') + parser.add_argument( + '--remove-dup', + action='store_true', + help='whether to remove duplicated frames') + parser.add_argument( + '--ext', + type=str, + default='mp4', + choices=['avi', 'mp4', 'webm'], + help='video file extensions') + parser.add_argument( + '--scale', + type=int, + default=256, + help='resize image short side length keeping ratio') + parser.add_argument( + '--num-worker', type=int, default=8, help='number of workers') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if not osp.isdir(args.out_dir): + print(f'Creating folder: {args.out_dir}') + os.makedirs(args.out_dir) + + print('Reading videos from folder: ', args.src_dir) + print('Extension of videos: ', args.ext) + fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' + + args.ext) + done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level + args.ext) + print('Total number of videos found: ', len(fullpath_list)) + print('Total number of videos transfer finished: ', + len(done_fullpath_list)) + if args.level == 2: + vid_list = list( + map( + lambda p: osp.join( + osp.basename(osp.dirname(p)), osp.basename(p)), + fullpath_list)) + elif args.level == 1: + vid_list = list(map(lambda p: osp.basename(p), fullpath_list)) + pool = Pool(args.num_worker) + pool.map(resize_videos, zip(fullpath_list, vid_list)) diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/README.md new file mode 100644 index 0000000..d85a6be --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/README.md @@ -0,0 +1,141 @@ +# Preparing Something-Something V1 + +## Introduction + +``` +@misc{goyal2017something, + title={The "something something" video database for learning and evaluating visual common sense}, + author={Raghav Goyal and Samira Ebrahimi Kahou and Vincent Michalski and Joanna Materzyńska and Susanne Westphal and Heuna Kim and Valentin Haenel and Ingo Fruend and Peter Yianilos and Moritz Mueller-Freitag and Florian Hoppe and Christian Thurau and Ingo Bax and Roland Memisevic}, + year={2017}, + eprint={1706.04261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +For basic dataset information, you can refer to the dataset [website](https://20bn.com/datasets/something-something/v1). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/sthv1/`. + +## Step 1. Prepare Annotations + +First of all, you have to sign in and download annotations to `$MMACTION2/data/sthv1/annotations` on the official [website](https://20bn.com/datasets/something-something/v1). + +## Step 2. Prepare RGB Frames + +Since the [sthv1 website](https://20bn.com/datasets/something-something/v1) doesn't provide the original video data and only extracted RGB frames are available, you have to directly download RGB frames from [sthv1 website](https://20bn.com/datasets/something-something/v1). + +You can download all RGB frame parts on [sthv1 website](https://20bn.com/datasets/something-something/v1) to `$MMACTION2/data/sthv1/` and use the following command to extract. + +```shell +cd $MMACTION2/data/sthv1/ +cat 20bn-something-something-v1-?? | tar zx +cd $MMACTION2/tools/data/sthv1/ +``` + +For users who only want to use RGB frames, you can skip to step 5 to generate file lists in the format of rawframes. Since the prefix of official JPGs is "%05d.jpg" (e.g., "00001.jpg"), we add "filename_tmpl='{:05}.jpg'" to the dict of `data.train`, `data.val` and `data.test` in the config files related with sthv1 like this: + +``` +data = dict( + videos_per_gpu=16, + workers_per_gpu=4, + train=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=data_root, + filename_tmpl='{:05}.jpg', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=val_pipeline), + test=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=data_root_val, + filename_tmpl='{:05}.jpg', + pipeline=test_pipeline)) +``` + +## Step 3. Extract Flow + +This part is **optional** if you only want to use RGB frames. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/sthv1_extracted/ +ln -s /mnt/SSD/sthv1_extracted/ ../../../data/sthv1/rawframes +``` + +Then, you can run the following script to extract optical flow based on RGB frames. + +```shell +cd $MMACTION2/tools/data/sthv1/ +bash extract_flow.sh +``` + +## Step 4. Encode Videos + +This part is **optional** if you only want to use RGB frames. + +You can run the following script to encode videos. + +```shell +cd $MMACTION2/tools/data/sthv1/ +bash encode_videos.sh +``` + +## Step 5. Generate File List + +You can run the follow script to generate file list in the format of rawframes and videos. + +```shell +cd $MMACTION2/tools/data/sthv1/ +bash generate_{rawframes, videos}_filelist.sh +``` + +## Step 5. Check Directory Structure + +After the whole data process for Something-Something V1 preparation, +you will get the rawframes (RGB + Flow), and annotation files for Something-Something V1. + +In the context of the whole project (for Something-Something V1 only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── sthv1 +│ │ ├── sthv1_{train,val}_list_rawframes.txt +│ │ ├── sthv1_{train,val}_list_videos.txt +│ │ ├── annotations +│ | ├── videos +│ | | ├── 1.mp4 +│ | | ├── 2.mp4 +│ | | ├──... +│ | ├── rawframes +│ | | ├── 1 +│ | | | ├── 00001.jpg +│ | | | ├── 00002.jpg +│ | | | ├── ... +│ | | | ├── flow_x_00001.jpg +│ | | | ├── flow_x_00002.jpg +│ | | | ├── ... +│ | | | ├── flow_y_00001.jpg +│ | | | ├── flow_y_00002.jpg +│ | | | ├── ... +│ | | ├── 2 +│ | | ├── ... + +``` + +For training and evaluating on Something-Something V1, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/encode_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/encode_videos.sh new file mode 100644 index 0000000..79a49ab --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/encode_videos.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_videos.py ../../data/sthv1/rawframes/ ../../data/sthv1/videos/ --fps 12 --level 1 --start-idx 1 --filename-tmpl '%05d' +echo "Encode videos" + +cd sthv1/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/extract_flow.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/extract_flow.sh new file mode 100644 index 0000000..25a6688 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/extract_flow.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/sthv1/rawframes/ ../../data/sthv1/rawframes/ --task flow --level 1 --flow-type tvl1 --input-frames +echo "Flow (tv-l1) Generated" +cd sthv1/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/generate_rawframes_filelist.sh new file mode 100644 index 0000000..b6a3935 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/generate_rawframes_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py sthv1 data/sthv1/rawframes/ --rgb-prefix '0' --num-split 1 --level 1 --subset train --format rawframes --shuffle +PYTHONPATH=. python tools/data/build_file_list.py sthv1 data/sthv1/rawframes/ --rgb-prefix '0' --num-split 1 --level 1 --subset val --format rawframes --shuffle +echo "Filelist for rawframes generated." + +cd tools/data/sthv1/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/generate_videos_filelist.sh new file mode 100644 index 0000000..4da50b9 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv1/generate_videos_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py sthv1 data/sthv1/videos/ --num-split 1 --level 1 --subset train --format videos --shuffle +PYTHONPATH=. python tools/data/build_file_list.py sthv1 data/sthv1/videos/ --num-split 1 --level 1 --subset val --format videos --shuffle +echo "Filelist for videos generated." + +cd tools/data/sthv1/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/README.md new file mode 100644 index 0000000..80e140c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/README.md @@ -0,0 +1,116 @@ +# Preparing Something-Something V2 + +## Introduction + +``` +@misc{goyal2017something, + title={The "something something" video database for learning and evaluating visual common sense}, + author={Raghav Goyal and Samira Ebrahimi Kahou and Vincent Michalski and Joanna Materzyńska and Susanne Westphal and Heuna Kim and Valentin Haenel and Ingo Fruend and Peter Yianilos and Moritz Mueller-Freitag and Florian Hoppe and Christian Thurau and Ingo Bax and Roland Memisevic}, + year={2017}, + eprint={1706.04261}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +For basic dataset information, you can refer to the dataset [website](https://20bn.com/datasets/something-something/v2). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/sthv2/`. + +## Step 1. Prepare Annotations + +First of all, you have to sign in and download annotations to `$MMACTION2/data/sthv2/annotations` on the official [website](https://20bn.com/datasets/something-something/v2). + +## Step 2. Prepare Videos + +Then, you can download all data parts to `$MMACTION2/data/sthv2/` and use the following command to extract. + +```shell +cd $MMACTION2/data/sthv2/ +cat 20bn-something-something-v2-?? | tar zx +cd $MMACTION2/tools/data/sthv2/ +``` + +## Step 3. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/sthv2_extracted/ +ln -s /mnt/SSD/sthv2_extracted/ ../../../data/sthv2/rawframes +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +cd $MMACTION2/tools/data/sthv2/ +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +cd $MMACTION2/tools/data/sthv2/ +bash extract_rgb_frames_opencv.sh +``` + +If both are required, run the following script to extract frames. + +```shell +cd $MMACTION2/tools/data/sthv2/ +bash extract_frames.sh +``` + +## Step 4. Generate File List + +you can run the follow script to generate file list in the format of rawframes and videos. + +```shell +cd $MMACTION2/tools/data/sthv2/ +bash generate_{rawframes, videos}_filelist.sh +``` + +## Step 5. Check Directory Structure + +After the whole data process for Something-Something V2 preparation, +you will get the rawframes (RGB + Flow), videos and annotation files for Something-Something V2. + +In the context of the whole project (for Something-Something V2 only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── sthv2 +│ │ ├── sthv2_{train,val}_list_rawframes.txt +│ │ ├── sthv2_{train,val}_list_videos.txt +│ │ ├── annotations +│ | ├── videos +│ | | ├── 1.mp4 +│ | | ├── 2.mp4 +│ | | ├──... +│ | ├── rawframes +│ | | ├── 1 +│ | | | ├── img_00001.jpg +│ | | | ├── img_00002.jpg +│ | | | ├── ... +│ | | | ├── flow_x_00001.jpg +│ | | | ├── flow_x_00002.jpg +│ | | | ├── ... +│ | | | ├── flow_y_00001.jpg +│ | | | ├── flow_y_00002.jpg +│ | | | ├── ... +│ | | ├── 2 +│ | | ├── ... + +``` + +For training and evaluating on Something-Something V2, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_frames.sh new file mode 100644 index 0000000..22bc436 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_frames.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/sthv2/videos/ ../../data/sthv2/rawframes/ --task both --level 1 --flow-type tvl1 --ext webm +echo "Raw frames (RGB and tv-l1) Generated" +cd sthv2/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_rgb_frames.sh new file mode 100644 index 0000000..5b6d58d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_rgb_frames.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/sthv2/videos/ ../../data/sthv2/rawframes/ --task rgb --level 1 --ext webm +echo "Genearte raw frames (RGB only)" + +cd sthv2/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..5805d61 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/extract_rgb_frames_opencv.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/sthv2/videos/ ../../data/sthv2/rawframes/ --task rgb --level 1 --ext webm --use-opencv +echo "Genearte raw frames (RGB only)" + +cd sthv2/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/generate_rawframes_filelist.sh new file mode 100644 index 0000000..da6f938 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/generate_rawframes_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py sthv2 data/sthv2/rawframes/ --num-split 1 --level 1 --subset train --format rawframes --shuffle +PYTHONPATH=. python tools/data/build_file_list.py sthv2 data/sthv2/rawframes/ --num-split 1 --level 1 --subset val --format rawframes --shuffle +echo "Filelist for rawframes generated." + +cd tools/data/sthv2/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/generate_videos_filelist.sh new file mode 100644 index 0000000..bef2554 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/sthv2/generate_videos_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/build_file_list.py sthv2 data/sthv2/videos/ --num-split 1 --level 1 --subset train --format videos --shuffle +PYTHONPATH=. python tools/data/build_file_list.py sthv2 data/sthv2/videos/ --num-split 1 --level 1 --subset val --format videos --shuffle +echo "Filelist for videos generated." + +cd tools/data/sthv2/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/README.md new file mode 100644 index 0000000..3fbbc6d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/README.md @@ -0,0 +1,141 @@ +# Preparing THUMOS'14 + +## Introduction + +``` +@misc{THUMOS14, + author = "Jiang, Y.-G. and Liu, J. and Roshan Zamir, A. and Toderici, G. and Laptev, + I. and Shah, M. and Sukthankar, R.", + title = "{THUMOS} Challenge: Action Recognition with a Large + Number of Classes", + howpublished = "\url{http://crcv.ucf.edu/THUMOS14/}", + Year = {2014} +} +``` + +For basic dataset information, you can refer to the dataset [website](https://www.crcv.ucf.edu/THUMOS14/download.html). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/thumos14/`. + +## Step 1. Prepare Annotations + +First of all, run the following script to prepare annotations. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash download_annotations.sh +``` + +## Step 2. Prepare Videos + +Then, you can run the following script to prepare videos. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash download_videos.sh +``` + +## Step 3. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/thumos14_extracted/ +ln -s /mnt/SSD/thumos14_extracted/ ../data/thumos14/rawframes/ +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash extract_rgb_frames_opencv.sh +``` + +If both are required, run the following script to extract frames. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash extract_frames.sh tvl1 +``` + +. +## Step 4. Fetch File List + +This part is **optional** if you do not use SSN model. + +You can run the follow script to fetch pre-computed tag proposals. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash fetch_tag_proposals.sh +``` + +## Step 5. Denormalize Proposal File + +This part is **optional** if you do not use SSN model. + +You can run the follow script to denormalize pre-computed tag proposals according to +actual number of local rawframes. + +```shell +cd $MMACTION2/tools/data/thumos14/ +bash denormalize_proposal_file.sh +``` + +## Step 6. Check Directory Structure + +After the whole data process for THUMOS'14 preparation, +you will get the rawframes (RGB + Flow), videos and annotation files for THUMOS'14. + +In the context of the whole project (for THUMOS'14 only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── thumos14 +│ │ ├── proposals +│ │ | ├── thumos14_tag_val_normalized_proposal_list.txt +│ │ | ├── thumos14_tag_test_normalized_proposal_list.txt +│ │ ├── annotations_val +│ │ ├── annotations_test +│ │ ├── videos +│ │ │ ├── val +│ │ │ | ├── video_validation_0000001.mp4 +│ │ │ | ├── ... +│ │ | ├── test +│ │ │ | ├── video_test_0000001.mp4 +│ │ │ | ├── ... +│ │ ├── rawframes +│ │ │ ├── val +│ │ │ | ├── video_validation_0000001 +| │ │ | │ ├── img_00001.jpg +| │ │ | │ ├── img_00002.jpg +| │ │ | │ ├── ... +| │ │ | │ ├── flow_x_00001.jpg +| │ │ | │ ├── flow_x_00002.jpg +| │ │ | │ ├── ... +| │ │ | │ ├── flow_y_00001.jpg +| │ │ | │ ├── flow_y_00002.jpg +| │ │ | │ ├── ... +│ │ │ | ├── ... +│ │ | ├── test +│ │ │ | ├── video_test_0000001 +``` + +For training and evaluating on THUMOS'14, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/denormalize_proposal_file.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/denormalize_proposal_file.sh new file mode 100644 index 0000000..92f561a --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/denormalize_proposal_file.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../../../ +PYTHONPATH=. python tools/data/denormalize_proposal_file.py thumos14 --norm-proposal-file data/thumos14/proposals/thumos14_tag_val_normalized_proposal_list.txt --data-prefix data/thumos14/rawframes/val/ +echo "Proposal file denormalized for val set" + +PYTHONPATH=. python tools/data/denormalize_proposal_file.py thumos14 --norm-proposal-file data/thumos14/proposals/thumos14_tag_test_normalized_proposal_list.txt --data-prefix data/thumos14/rawframes/test/ +echo "Proposal file denormalized for test set" + +cd tools/data/thumos14/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/download_annotations.sh new file mode 100644 index 0000000..fc8473f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/download_annotations.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/thumos14/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi +cd ${DATA_DIR} + +wget http://crcv.ucf.edu/THUMOS14/Validation_set/TH14_Temporal_annotations_validation.zip --no-check-certificate +wget http://crcv.ucf.edu/THUMOS14/test_set/TH14_Temporal_annotations_test.zip --no-check-certificate + +if [ ! -d "./annotations_val" ]; then + mkdir ./annotations_val +fi +unzip -j TH14_Temporal_annotations_validation.zip -d annotations_val + +if [ ! -d "./annotations_test" ]; then + mkdir ./annotations_test +fi +unzip -j TH14_Temporal_annotations_test.zip -d annotations_test + +rm TH14_Temporal_annotations_validation.zip +rm TH14_Temporal_annotations_test.zip + +cd "../../tools/data/thumos14/" diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/download_videos.sh new file mode 100644 index 0000000..e987bb4 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/download_videos.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/thumos14/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} + +wget https://storage.googleapis.com/thumos14_files/TH14_validation_set_mp4.zip +wget https://storage.googleapis.com/thumos14_files/TH14_Test_set_mp4.zip + +if [ ! -d "./videos/val" ]; then + mkdir -p ./videos/val +fi +unzip -j TH14_validation_set_mp4.zip -d videos/val + +if [ ! -d "./videos/test" ]; then + mkdir -p ./videos/test +fi +unzip -P "THUMOS14_REGISTERED" -j TH14_Test_set_mp4.zip -d videos/test + +cd "../../tools/data/thumos14/" diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_frames.sh new file mode 100644 index 0000000..edf6be2 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_frames.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/thumos14/videos/val/ ../../data/thumos14/rawframes/val/ --level 1 --flow-type tvl1 --ext mp4 --task both +echo "Raw frames (RGB and tv-l1) Generated for val set" + +python build_rawframes.py ../../data/thumos14/videos/test/ ../../data/thumos14/rawframes/test/ --level 1 --flow-type tvl1 --ext mp4 --task both +echo "Raw frames (RGB and tv-l1) Generated for test set" + +cd thumos14/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_rgb_frames.sh new file mode 100644 index 0000000..776575f --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_rgb_frames.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/thumos14/videos/val/ ../../data/thumos14/rawframes/val/ --level 1 --ext mp4 --task rgb +echo "Raw frames (RGB only) generated for val set" + +python build_rawframes.py ../../data/thumos14/videos/test/ ../../data/thumos14/rawframes/test/ --level 1 --ext mp4 --task rgb +echo "Raw frames (RGB only) generated for test set" + +cd thumos14/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..d4fad08 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/extract_rgb_frames_opencv.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/thumos14/videos/val/ ../../data/thumos14/rawframes/val/ --level 1 --ext mp4 --task rgb --use-opencv +echo "Raw frames (RGB only) generated for val set" + +python build_rawframes.py ../../data/thumos14/videos/test/ ../../data/thumos14/rawframes/test/ --level 1 --ext mp4 --task rgb --use-opencv +echo "Raw frames (RGB only) generated for test set" + +cd thumos14/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/fetch_tag_proposals.sh b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/fetch_tag_proposals.sh new file mode 100644 index 0000000..5d692b7 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/thumos14/fetch_tag_proposals.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +PROP_DIR="../../../data/thumos14/proposals" + +if [[ ! -d "${PROP_DIR}" ]]; then + echo "${PROP_DIR} does not exist. Creating"; + mkdir -p ${PROP_DIR} +fi + +wget https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/filelist/thumos14_tag_val_normalized_proposal_list.txt -P ${PROP_DIR} +wget https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmaction/filelist/thumos14_tag_test_normalized_proposal_list.txt -P ${PROP_DIR} diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/README.md new file mode 100644 index 0000000..91fdc19 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/README.md @@ -0,0 +1,125 @@ +# Preparing UCF-101 + +## Introduction + +``` +@article{Soomro2012UCF101AD, + title={UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild}, + author={K. Soomro and A. Zamir and M. Shah}, + journal={ArXiv}, + year={2012}, + volume={abs/1212.0402} +} +``` + +For basic dataset information, you can refer to the dataset [website](https://www.crcv.ucf.edu/research/data-sets/ucf101/). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/ucf101/`. + +## Step 1. Prepare Annotations + +First of all, you can run the following script to prepare annotations. + +```shell +bash download_annotations.sh +``` + +## Step 2. Prepare Videos + +Then, you can run the following script to prepare videos. + +```shell +bash download_videos.sh +``` + +For better decoding speed, you can resize the original videos into smaller sized, densely encoded version by: + +``` +python ../resize_videos.py ../../../data/ucf101/videos/ ../../../data/ucf101/videos_256p_dense_cache --dense --level 2 --ext avi +``` + +## Step 3. Extract RGB and Flow + +This part is **optional** if you only want to use the video loader. + +Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow). + +If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. The extracted frames (RGB + Flow) will take up about 100GB. + +You can run the following script to soft link SSD. + +```shell +# execute these two line (Assume the SSD is mounted at "/mnt/SSD/") +mkdir /mnt/SSD/ucf101_extracted/ +ln -s /mnt/SSD/ucf101_extracted/ ../../../data/ucf101/rawframes +``` + +If you only want to play with RGB frames (since extracting optical flow can be time-consuming), consider running the following script to extract **RGB-only** frames using denseflow. + +```shell +bash extract_rgb_frames.sh +``` + +If you didn't install denseflow, you can still extract RGB frames using OpenCV by the following script, but it will keep the original size of the images. + +```shell +bash extract_rgb_frames_opencv.sh +``` + +If both are required, run the following script to extract frames using "tvl1" algorithm. + +```shell +bash extract_frames.sh +``` + +## Step 4. Generate File List + +you can run the follow script to generate file list in the format of rawframes and videos. + +```shell +bash generate_videos_filelist.sh +bash generate_rawframes_filelist.sh +``` + +## Step 5. Check Directory Structure + +After the whole data process for UCF-101 preparation, +you will get the rawframes (RGB + Flow), videos and annotation files for UCF-101. + +In the context of the whole project (for UCF-101 only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── ucf101 +│ │ ├── ucf101_{train,val}_split_{1,2,3}_rawframes.txt +│ │ ├── ucf101_{train,val}_split_{1,2,3}_videos.txt +│ │ ├── annotations +│ │ ├── videos +│ │ │ ├── ApplyEyeMakeup +│ │ │ │ ├── v_ApplyEyeMakeup_g01_c01.avi + +│ │ │ ├── YoYo +│ │ │ │ ├── v_YoYo_g25_c05.avi +│ │ ├── rawframes +│ │ │ ├── ApplyEyeMakeup +│ │ │ │ ├── v_ApplyEyeMakeup_g01_c01 +│ │ │ │ │ ├── img_00001.jpg +│ │ │ │ │ ├── img_00002.jpg +│ │ │ │ │ ├── ... +│ │ │ │ │ ├── flow_x_00001.jpg +│ │ │ │ │ ├── flow_x_00002.jpg +│ │ │ │ │ ├── ... +│ │ │ │ │ ├── flow_y_00001.jpg +│ │ │ │ │ ├── flow_y_00002.jpg +│ │ │ ├── ... +│ │ │ ├── YoYo +│ │ │ │ ├── v_YoYo_g01_c01 +│ │ │ │ ├── ... +│ │ │ │ ├── v_YoYo_g25_c05 + +``` + +For training and evaluating on UCF-101, please refer to [getting_started.md](/docs/getting_started.md). diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/download_annotations.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/download_annotations.sh new file mode 100644 index 0000000..7a8822b --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/download_annotations.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/ucf101/annotations" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +wget https://www.crcv.ucf.edu/wp-content/uploads/2019/03/UCF101TrainTestSplits-RecognitionTask.zip --no-check-certificate + +unzip -j UCF101TrainTestSplits-RecognitionTask.zip -d ${DATA_DIR}/ +rm UCF101TrainTestSplits-RecognitionTask.zip diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/download_videos.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/download_videos.sh new file mode 100644 index 0000000..6efd98e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/download_videos.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +DATA_DIR="../../../data/ucf101/" + +if [[ ! -d "${DATA_DIR}" ]]; then + echo "${DATA_DIR} does not exist. Creating"; + mkdir -p ${DATA_DIR} +fi + +cd ${DATA_DIR} + +wget https://www.crcv.ucf.edu/datasets/human-actions/ucf101/UCF101.rar --no-check-certificate +unrar x UCF101.rar +mv ./UCF-101 ./videos + +cd "../../tools/data/ucf101" diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_frames.sh new file mode 100644 index 0000000..b549b6e --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_frames.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/ucf101/videos/ ../../data/ucf101/rawframes/ --task both --level 2 --flow-type tvl1 +echo "Raw frames (RGB and Flow) Generated" +cd ucf101/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_rgb_frames.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_rgb_frames.sh new file mode 100644 index 0000000..b39df7c --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_rgb_frames.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/ucf101/videos/ ../../data/ucf101/rawframes/ --task rgb --level 2 --ext avi +echo "Genearte raw frames (RGB only)" + +cd ucf101/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_rgb_frames_opencv.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_rgb_frames_opencv.sh new file mode 100644 index 0000000..50d1ac3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/extract_rgb_frames_opencv.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +cd ../ +python build_rawframes.py ../../data/ucf101/videos/ ../../data/ucf101/rawframes/ --task rgb --level 2 --ext avi --use-opencv +echo "Genearte raw frames (RGB only)" + +cd ucf101/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/generate_rawframes_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/generate_rawframes_filelist.sh new file mode 100644 index 0000000..9b9ed99 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/generate_rawframes_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ + +PYTHONPATH=. python tools/data/build_file_list.py ucf101 data/ucf101/rawframes/ --level 2 --format rawframes --shuffle +echo "Filelist for rawframes generated." + +cd tools/data/ucf101/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/generate_videos_filelist.sh b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/generate_videos_filelist.sh new file mode 100644 index 0000000..5f39143 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101/generate_videos_filelist.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +cd ../../../ + +PYTHONPATH=. python tools/data/build_file_list.py ucf101 data/ucf101/videos/ --level 2 --format videos --shuffle +echo "Filelist for videos generated." + +cd tools/data/ucf101/ diff --git a/Downstream/Open-Set-Action-Recognition/tools/data/ucf101_24/README.md b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101_24/README.md new file mode 100644 index 0000000..621f898 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/data/ucf101_24/README.md @@ -0,0 +1,82 @@ +# Preparing UCF101-24 + +## Introduction + +``` +@article{Soomro2012UCF101AD, + title={UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild}, + author={K. Soomro and A. Zamir and M. Shah}, + journal={ArXiv}, + year={2012}, + volume={abs/1212.0402} +} +``` + +For basic dataset information, you can refer to the dataset [website](http://www.thumos.info/download.html). +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/ucf101_24/`. + +## Download and Extract + +You can download the RGB frames, optical flow and ground truth annotations from [google drive](https://drive.google.com/drive/folders/1BvGywlAGrACEqRyfYbz3wzlVV3cDFkct). +The data are provided from [MOC](https://github.com/MCG-NJU/MOC-Detector/blob/master/readme/Dataset.md), which is adapted from [act-detector](https://github.com/vkalogeiton/caffe/tree/act-detector) and [corrected-UCF101-Annots](https://github.com/gurkirt/corrected-UCF101-Annots). + +**Note**: The annotation of this UCF101-24 is from [here](https://github.com/gurkirt/corrected-UCF101-Annots), which is more correct. + +After downloading the `UCF101_v2.tar.gz` file and put it in `$MMACTION2/tools/data/ucf101_24/`, you can run the following command to extract. + +```shell +tar -zxvf UCF101_v2.tar.gz +``` + +## Check Directory Structure + +After extracting, you will get the `rgb-images` directory, `brox-images` directory and `UCF101v2-GT.pkl` for UCF101-24. + +In the context of the whole project (for UCF101-24 only), the folder structure will look like: + +``` +mmaction2 +├── mmaction +├── tools +├── configs +├── data +│ ├── ucf101_24 +│ | ├── brox-images +│ | | ├── Basketball +│ | | | ├── v_Basketball_g01_c01 +│ | | | | ├── 00001.jpg +│ | | | | ├── 00002.jpg +│ | | | | ├── ... +│ | | | | ├── 00140.jpg +│ | | | | ├── 00141.jpg +│ | | ├── ... +│ | | ├── WalkingWithDog +│ | | | ├── v_WalkingWithDog_g01_c01 +│ | | | ├── ... +│ | | | ├── v_WalkingWithDog_g25_c04 +│ | ├── rgb-images +│ | | ├── Basketball +│ | | | ├── v_Basketball_g01_c01 +│ | | | | ├── 00001.jpg +│ | | | | ├── 00002.jpg +│ | | | | ├── ... +│ | | | | ├── 00140.jpg +│ | | | | ├── 00141.jpg +│ | | ├── ... +│ | | ├── WalkingWithDog +│ | | | ├── v_WalkingWithDog_g01_c01 +│ | | | ├── ... +│ | | | ├── v_WalkingWithDog_g25_c04 +│ | ├── UCF101v2-GT.pkl + +``` + +**Note**: The `UCF101v2-GT.pkl` exists as a cache, it contains 6 items as follows: +1. `labels` (list): List of the 24 labels. +2. `gttubes` (dict): Dictionary that contains the ground truth tubes for each video. + A **gttube** is dictionary that associates with each index of label and a list of tubes. + A **tube** is a numpy array with `nframes` rows and 5 columns, each col is in format like ` `. +3. `nframes` (dict): Dictionary that contains the number of frames for each video, like `'HorseRiding/v_HorseRiding_g05_c02': 151`. +4. `train_videos` (list): A list with `nsplits=1` elements, each one containing the list of training videos. +5. `test_videos` (list): A list with `nsplits=1` elements, each one containing the list of testing videos. +6. `resolution` (dict): Dictionary that outputs a tuple (h,w) of the resolution for each video, like `'FloorGymnastics/v_FloorGymnastics_g09_c03': (240, 320)`. diff --git a/Downstream/Open-Set-Action-Recognition/tools/dist_test.sh b/Downstream/Open-Set-Action-Recognition/tools/dist_test.sh new file mode 100644 index 0000000..3c74ec6 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +PORT=${PORT:-29500} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} diff --git a/Downstream/Open-Set-Action-Recognition/tools/dist_train.sh b/Downstream/Open-Set-Action-Recognition/tools/dist_train.sh new file mode 100644 index 0000000..5b43fff --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +PORT=${PORT:-29500} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} diff --git a/Downstream/Open-Set-Action-Recognition/tools/flow_extraction.py b/Downstream/Open-Set-Action-Recognition/tools/flow_extraction.py new file mode 100644 index 0000000..2a2cad8 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/flow_extraction.py @@ -0,0 +1,186 @@ +import argparse +import os +import os.path as osp + +import cv2 +import numpy as np + + +def flow_to_img(raw_flow, bound=20.): + """Convert flow to gray image. + + Args: + raw_flow (np.ndarray[float]): Estimated flow with the shape (w, h). + bound (float): Bound for the flow-to-image normalization. Default: 20. + + Returns: + np.ndarray[uint8]: The result list of np.ndarray[uint8], with shape + (w, h). + """ + flow = np.clip(raw_flow, -bound, bound) + flow += bound + flow *= (255 / float(2 * bound)) + flow = flow.astype(np.uint8) + return flow + + +def generate_flow(frames, method='tvl1'): + """Estimate flow with given frames. + + Args: + frames (list[np.ndarray[uint8]]): List of rgb frames, with shape + (w, h, 3). + method (str): Use which method to generate flow. Options are 'tvl1' + and 'farneback'. Default: 'tvl1'. + + Returns: + list[np.ndarray[float]]: The result list of np.ndarray[float], with + shape (w, h, 2). + """ + assert method in ['tvl1', 'farneback'] + gray_frames = [cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) for frame in frames] + + if method == 'tvl1': + tvl1 = cv2.optflow.DualTVL1OpticalFlow_create() + + def op(x, y): + return tvl1.calc(x, y, None) + elif method == 'farneback': + + def op(x, y): + return cv2.calcOpticalFlowFarneback(x, y, None, 0.5, 3, 15, 3, 5, + 1.2, 0) + + gray_st = gray_frames[:-1] + gray_ed = gray_frames[1:] + + flow = [op(x, y) for x, y in zip(gray_st, gray_ed)] + return flow + + +def extract_dense_flow(path, + dest, + bound=20., + save_rgb=False, + start_idx=0, + rgb_tmpl='img_{:05d}.jpg', + flow_tmpl='{}_{:05d}.jpg', + method='tvl1'): + """Extract dense flow given video or frames, save them as gray-scale + images. + + Args: + path (str): Location of the input video. + dest (str): The directory to store the extracted flow images. + bound (float): Bound for the flow-to-image normalization. Default: 20. + save_rgb (bool): Save extracted RGB frames. Default: False. + start_idx (int): The starting frame index if use frames as input, the + first image is path.format(start_idx). Default: 0. + rgb_tmpl (str): The template of RGB frame names, Default: + 'img_{:05d}.jpg'. + flow_tmpl (str): The template of Flow frame names, Default: + '{}_{:05d}.jpg'. + method (str): Use which method to generate flow. Options are 'tvl1' + and 'farneback'. Default: 'tvl1'. + """ + + frames = [] + assert osp.exists(path) + video = cv2.VideoCapture(path) + flag, f = video.read() + while flag: + frames.append(f) + flag, f = video.read() + + flow = generate_flow(frames, method=method) + + flow_x = [flow_to_img(x[:, :, 0], bound) for x in flow] + flow_y = [flow_to_img(x[:, :, 1], bound) for x in flow] + + if not osp.exists(dest): + os.system('mkdir -p ' + dest) + flow_x_names = [ + osp.join(dest, flow_tmpl.format('x', ind + start_idx)) + for ind in range(len(flow_x)) + ] + flow_y_names = [ + osp.join(dest, flow_tmpl.format('y', ind + start_idx)) + for ind in range(len(flow_y)) + ] + + num_frames = len(flow) + for i in range(num_frames): + cv2.imwrite(flow_x_names[i], flow_x[i]) + cv2.imwrite(flow_y_names[i], flow_y[i]) + + if save_rgb: + img_names = [ + osp.join(dest, rgb_tmpl.format(ind + start_idx)) + for ind in range(len(frames)) + ] + for frame, name in zip(frames, img_names): + cv2.imwrite(name, frame) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Extract flow and RGB images') + parser.add_argument( + '--input', + help='videos for frame extraction, can be' + 'single video or a video list, the video list should be a txt file ' + 'and just consists of filenames without directories') + parser.add_argument( + '--prefix', + default='', + help='the prefix of input ' + 'videos, used when input is a video list') + parser.add_argument( + '--dest', + default='', + help='the destination to save ' + 'extracted frames') + parser.add_argument( + '--save-rgb', action='store_true', help='also save ' + 'rgb frames') + parser.add_argument( + '--rgb-tmpl', + default='img_{:05d}.jpg', + help='template filename of rgb frames') + parser.add_argument( + '--flow-tmpl', + default='{}_{:05d}.jpg', + help='template filename of flow frames') + parser.add_argument( + '--start-idx', + type=int, + default=1, + help='the start ' + 'index of extracted frames') + parser.add_argument( + '--method', + default='tvl1', + help='use which method to ' + 'generate flow') + parser.add_argument( + '--bound', type=float, default=20, help='maximum of ' + 'optical flow') + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + if args.input.endswith('.txt'): + lines = open(args.input).readlines() + lines = [x.strip() for x in lines] + videos = [osp.join(args.prefix, x) for x in lines] + dests = [osp.join(args.dest, x.split('.')[0]) for x in lines] + for video, dest in zip(videos, dests): + extract_dense_flow(video, dest, args.bound, args.save_rgb, + args.start_idx, args.rgb_tmpl, args.flow_tmpl, + args.method) + else: + extract_dense_flow(args.input, args.dest, args.bound, args.save_rgb, + args.start_idx, args.rgb_tmpl, args.flow_tmpl, + args.method) diff --git a/Downstream/Open-Set-Action-Recognition/tools/hypertune.py b/Downstream/Open-Set-Action-Recognition/tools/hypertune.py new file mode 100644 index 0000000..e652b3d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/hypertune.py @@ -0,0 +1,188 @@ +import argparse +import copy +import os +import os.path as osp +import time +import warnings + +import mmcv +import torch +from mmcv import Config, DictAction +from mmcv.runner import init_dist, set_random_seed +from mmcv.utils import get_git_hash +import numpy as np + +from mmaction import __version__ +from mmaction.apis import train_model +from mmaction.datasets import build_dataset +from mmaction.models import build_model +from mmaction.utils import collect_env, get_root_logger + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a recognizer') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--validate', + action='store_true', + help='whether to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument('--weight_factor', nargs='+', type=float, default=[0.1, 0.2, 0.3, 0.4]) + parser.add_argument('--batch_size', type=int, default=8) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + + cfg.merge_from_dict(args.cfg_options) + + # for hyperparameter searching + cfg.log_config.hooks = [dict(type='TextLoggerHook')] + cfg.checkpoint_config.interval = 10 + cfg.evaluation.interval = 10 + cfg.data.videos_per_gpu = args.batch_size + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: + # CLI > config file > default (base filename) + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + if args.resume_from is not None: + cfg.resume_from = args.resume_from + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # The flag is used to determine whether it is omnisource training + cfg.setdefault('omnisource', False) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config: {cfg.text}') + + # set random seeds + if args.seed is not None: + logger.info(f'Set random seed to {args.seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + meta['config_name'] = osp.basename(args.config) + meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\')) + + if cfg.omnisource: + # If omnisource flag is set, cfg.data.train should be a list + assert type(cfg.data.train) is list + datasets = [build_dataset(dataset) for dataset in cfg.data.train] + else: + datasets = [build_dataset(cfg.data.train)] + + if len(cfg.workflow) == 2: + # For simplicity, omnisource is not compatiable with val workflow, + # we recommend you to use `--validate` + assert not cfg.omnisource + if args.validate: + warnings.warn('val workflow is duplicated with `--validate`, ' + 'it is recommended to use `--validate`. see ' + 'https://github.com/open-mmlab/mmaction2/pull/123') + val_dataset = copy.deepcopy(cfg.data.val) + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmaction version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmaction_version=__version__ + get_git_hash(digits=7), + config=cfg.text) + + # search for hyperprameters + save_dir = cfg.work_dir + for v in args.weight_factor: + print("searching for v=%.1f"%(v)) + # change the hyperparameter + cfg.model.debias_head.loss_factor = v + cfg.work_dir = osp.join(save_dir, 'search_%.1f'%(v)) + # build the model + model = build_model( + cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) + # training and validation + train_model(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/publish_model.py b/Downstream/Open-Set-Action-Recognition/tools/publish_model.py new file mode 100644 index 0000000..c20e7e3 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/publish_model.py @@ -0,0 +1,39 @@ +import argparse +import subprocess + +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + final_file = out_file_name + f'-{sha[:8]}.pth' + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Open-Set-Action-Recognition/tools/pytorch2onnx.py b/Downstream/Open-Set-Action-Recognition/tools/pytorch2onnx.py new file mode 100644 index 0000000..e7ef65d --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/pytorch2onnx.py @@ -0,0 +1,163 @@ +import argparse + +import mmcv +import numpy as np +import torch +from mmcv.runner import load_checkpoint + +from mmaction.models import build_model + +try: + import onnx + import onnxruntime as rt +except ImportError as e: + raise ImportError(f'Please install onnx and onnxruntime first. {e}') + +try: + from mmcv.onnx.symbolic import register_extra_symbolics +except ModuleNotFoundError: + raise NotImplementedError('please update mmcv to version>=1.0.4') + + +def _convert_batchnorm(module): + """Convert the syncBNs into normal BN3ds.""" + module_output = module + if isinstance(module, torch.nn.SyncBatchNorm): + module_output = torch.nn.BatchNorm3d(module.num_features, module.eps, + module.momentum, module.affine, + module.track_running_stats) + if module.affine: + module_output.weight.data = module.weight.data.clone().detach() + module_output.bias.data = module.bias.data.clone().detach() + # keep requires_grad unchanged + module_output.weight.requires_grad = module.weight.requires_grad + module_output.bias.requires_grad = module.bias.requires_grad + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + for name, child in module.named_children(): + module_output.add_module(name, _convert_batchnorm(child)) + del module + return module_output + + +def pytorch2onnx(model, + input_shape, + opset_version=11, + show=False, + output_file='tmp.onnx', + verify=False): + """Convert pytorch model to onnx model. + + Args: + model (:obj:`nn.Module`): The pytorch model to be exported. + input_shape (tuple[int]): The input tensor shape of the model. + opset_version (int): Opset version of onnx used. Default: 11. + show (bool): Determines whether to print the onnx model architecture. + Default: False. + output_file (str): Output onnx model name. Default: 'tmp.onnx'. + verify (bool): Determines whether to verify the onnx model. + Default: False. + """ + model.cpu().eval() + + one_img = torch.randn(input_shape) + + register_extra_symbolics(opset_version) + torch.onnx.export( + model, + one_img, + output_file, + export_params=True, + keep_initializers_as_inputs=True, + verbose=show, + opset_version=opset_version) + + print(f'Successfully exported ONNX model: {output_file}') + if verify: + # check by onnx + onnx_model = onnx.load(output_file) + onnx.checker.check_model(onnx_model) + + # check the numerical value + # get pytorch output + pytorch_result = model(one_img)[0].detach().numpy() + + # get onnx output + input_all = [node.name for node in onnx_model.graph.input] + input_initializer = [ + node.name for node in onnx_model.graph.initializer + ] + net_feed_input = list(set(input_all) - set(input_initializer)) + assert len(net_feed_input) == 1 + sess = rt.InferenceSession(output_file) + onnx_result = sess.run(None, + {net_feed_input[0]: one_img.detach().numpy() + })[0] + # only compare part of results + assert np.allclose( + pytorch_result[:, 4], onnx_result[:, 4] + ), 'The outputs are different between Pytorch and ONNX' + print('The numerical values are same between Pytorch and ONNX') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert MMAction2 models to ONNX') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--show', action='store_true', help='show onnx graph') + parser.add_argument('--output-file', type=str, default='tmp.onnx') + parser.add_argument('--opset-version', type=int, default=11) + parser.add_argument( + '--verify', + action='store_true', + help='verify the onnx model output against pytorch output') + parser.add_argument( + '--is-localizer', + action='store_true', + help='whether it is a localizer') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[1, 3, 8, 224, 224], + help='input video size') + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + + assert args.opset_version == 11, 'MMAction2 only supports opset 11 now' + + cfg = mmcv.Config.fromfile(args.config) + # import modules from string list. + + if not args.is_localizer: + cfg.model.backbone.pretrained = None + + # build the model + model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + model = _convert_batchnorm(model) + + # onnx.export does not support kwargs + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + elif hasattr(model, '_forward') and args.is_localizer: + model.forward = model._forward + else: + raise NotImplementedError( + 'Please implement the forward method for exporting.') + + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + + # conver model to onnx file + pytorch2onnx( + model, + args.shape, + opset_version=args.opset_version, + show=args.show, + output_file=args.output_file, + verify=args.verify) diff --git a/Downstream/Open-Set-Action-Recognition/tools/slurm_test.sh b/Downstream/Open-Set-Action-Recognition/tools/slurm_test.sh new file mode 100644 index 0000000..6dd67e5 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/Downstream/Open-Set-Action-Recognition/tools/slurm_train b/Downstream/Open-Set-Action-Recognition/tools/slurm_train new file mode 100644 index 0000000..15e77f0 --- /dev/null +++ b/Downstream/Open-Set-Action-Recognition/tools/slurm_train @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +ARGPARSE_DESCRIPTION="Train recognizer on slurm cluster" +source $(dirname $0)/argparse.bash || exit 1 +argparse "$@" < config file > default (base filename) + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + if args.resume_from is not None: + cfg.resume_from = args.resume_from + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # The flag is used to determine whether it is omnisource training + cfg.setdefault('omnisource', False) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config: {cfg.text}') + + # set random seeds + if args.seed is not None: + logger.info(f'Set random seed to {args.seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + meta['config_name'] = osp.basename(args.config) + meta['work_dir'] = osp.basename(cfg.work_dir.rstrip('/\\')) + + model = build_model( + cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) + + if cfg.omnisource: + # If omnisource flag is set, cfg.data.train should be a list + assert type(cfg.data.train) is list + datasets = [build_dataset(dataset) for dataset in cfg.data.train] + else: + datasets = [build_dataset(cfg.data.train)] + + if len(cfg.workflow) == 2: + # For simplicity, omnisource is not compatiable with val workflow, + # we recommend you to use `--validate` + assert not cfg.omnisource + if args.validate: + warnings.warn('val workflow is duplicated with `--validate`, ' + 'it is recommended to use `--validate`. see ' + 'https://github.com/open-mmlab/mmaction2/pull/123') + val_dataset = copy.deepcopy(cfg.data.val) + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmaction version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmaction_version=__version__ + get_git_hash(digits=7), + config=cfg.text) + + train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=args.validate, + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/Downstream/Spatial-Temporal-Action-Localization/.DS_Store b/Downstream/Spatial-Temporal-Action-Localization/.DS_Store new file mode 100644 index 0000000..8862eb2 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/.DS_Store differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/FINETUNE.md b/Downstream/Spatial-Temporal-Action-Localization/FINETUNE.md new file mode 100644 index 0000000..29a4ab6 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/FINETUNE.md @@ -0,0 +1,87 @@ +# Fine-tuning VideoMAE + +## Original Implementation + +The implementation of our VideoMAE supports **multi-node distributed training**. We provide the **off-the-shelf** scripts in the [scripts folder](scripts). + +- For example, to fine-tune VideoMAE ViT-Base on **Something-Something V2** with 64 GPUs (8 nodes x 8 GPUs), you can run + + ```bash + OUTPUT_DIR='YOUR_PATH/ssv2_videomae_pretrain_base_patch16_224_frame_16x2_tube_mask_ratio_0.9_e2400/eval_lr_1e-3_epoch_40' + DATA_PATH='YOUR_PATH/list_ssv2' + MODEL_PATH='YOUR_PATH/ssv2_videomae_pretrain_base_patch16_224_frame_16x2_tube_mask_ratio_0.9_e2400/checkpoint-2399.pth' + + OMP_NUM_THREADS=1 python -m torch.distributed.launch --nproc_per_node=8 \ + --master_port 12320 --nnodes=8 \ + --node_rank=0 --master_addr=$ip_node_0 \ + run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --num_sample 1 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 2 \ + --opt adamw \ + --lr 1e-3 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 40 \ + --dist_eval \ + --test_num_segment 2 \ + --test_num_crop 3 \ + --enable_deepspeed + ``` + + on the first node. On other nodes, run the same command with `--node_rank 1`, ..., `--node_rank 7` respectively. `--master_addr` is set as the ip of the node 0. + +- For example, to fine-tune VideoMAE ViT-Base on **Kinetics400** with 64 GPUs (8 nodes x 8 GPUs), you can run + + ```bash + OUTPUT_DIR='YOUR_PATH/k400_videomae_pretrain_base_patch16_224_frame_16x4_tube_mask_ratio_0.9_e800/eval_lr_1e-3_epoch_100' + DATA_PATH='YOUR_PATH/list_kinetics-400' + MODEL_PATH='YOUR_PATH/k400_videomae_pretrain_base_patch16_224_frame_16x4_tube_mask_ratio_0.9_e800/checkpoint-799.pth' + + OMP_NUM_THREADS=1 python -m torch.distributed.launch --nproc_per_node=8 \ + --master_port 12320 --nnodes=8 \ + --node_rank=0 --master_addr=$ip_node_0 \ + run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --num_sample 1 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 1e-3 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 100 \ + --dist_eval \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --enable_deepspeed + ``` + + on the first node. On other nodes, run the same command with `--node_rank 1`, ..., `--node_rank 7` respectively. `--master_addr` is set as the ip of the node 0. + +### Note: + +- We didn't use `cls token` in our implementation, and directly average the feature of last layer for video classification. +- Here total batch size = (`batch_size` per gpu) x `nodes` x (gpus per node). +- `lr` here is the base learning rate. The ` actual lr` is computed by the [linear scaling rule](https://arxiv.org/abs/1706.02677): `` actual lr`` = `lr` * total batch size / 256. + +## Slurm + +TODO \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/README.md b/Downstream/Spatial-Temporal-Action-Localization/README.md new file mode 100644 index 0000000..d9893fc --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/README.md @@ -0,0 +1,165 @@ +# VideoMAE Installation + +## Environment Configuration + +The codebase is mainly built with following libraries: + +- Python 3.6 or higher +- [PyTorch](https://pytorch.org/) and [torchvision](https://github.com/pytorch/vision).
+ We can successfully reproduce the main results in two settings:
+ Tesla **A100** (40G): CUDA 11.1 + PyTorch 1.8.0 + torchvision 0.9.0 + Tesla **V100** (32G): CUDA 10.1 + PyTorch 1.6.0 + torchvision 0.7.0 +
The torch version here has a great impact on the results. It is recommended to configure the environment according to such settings or a newer version.
+- [timm==0.4.8/0.4.12](https://github.com/rwightman/pytorch-image-models) +- [deepspeed==0.5.8](https://github.com/microsoft/DeepSpeed) +- [TensorboardX](https://github.com/lanpa/tensorboardX) +- [decord](https://github.com/dmlc/decord) +- [einops](https://github.com/arogozhnikov/einops) +- [av](https://github.com/PyAV-Org/PyAV) +- [tqdm](https://github.com/tqdm/tqdm) + +
We recommend to setup the environment with Anaconda, the step-by-step installation script is shown below.
+```shell +conda create -n VideoMAE_ava python=3.7 +conda activate VideoMAE_ava + +#install pytorch with the same cuda version as in your environment +pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 + +conda install av -c conda-forge +conda install cython +``` + + +## Data Preparation + +### AVA2.2 +The code combines [VideoMAE](https://github.com/MCG-NJU/VideoMAE) and [Alphaction](https://github.com/MVIG-SJTU/AlphAction), and the preparation of AVA data refers to the [data preparation](https://github.com/MVIG-SJTU/AlphAction/blob/master/DATA.md) of Alphaction. If you only need to train and test on the AVA dataset, you do not need to prepare the Kinetics dataset. + +### Kinetics + +### Other_files +`video_map.npy` : Mapping of video id and corresponding video path + +`ak_val_gt.csv` : The ground truth of the val-set of ava-kinetics + + +### AVA-Kinetics file +In order to facilitate everyone to download together, we have organized the annotation files we used, which are available for download in [OneDrive](https://1drv.ms/u/s!AjZXNAXrK-eti2zl_JNeQbmizFbC?e=VjdMJ3). It should be noted that the files we use may be slightly different from the officially provided files, especially for kinetics, the annotations we use The version may be older, and some videos may be different from what you are downloading now. + +## Train +Here is a script that uses the ava-kinetics dataset for training and eval on the ava dataset +```bash +MODEL_PATH='YOUR_PATH/PRETRAIN_MODEL.pth' +OUTPUT_DIR='YOUR_PATH/OUTPUT_DIR' +python -m torch.distributed.launch --nproc_per_node=8 \ + --master_port 12320 --nnodes=8 \ + --node_rank=0 --master_addr=$ip_node_0 \ + run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --update_freq 1 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava-kinetics" \ + --enable_deepspeed \ + --val_freq 30 \ + --drop_path 0.2\ +``` + + +* SLURM ENV +```bash +MODEL_PATH='YOUR_PATH/PRETRAIN_MODEL.pth' +OUTPUT_DIR='YOUR_PATH/OUTPUT_DIR' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p video \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --update_freq 1 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --enable_deepspeed \ + --val_freq 30 \ + --drop_path 0.2\ + ${PY_ARGS} +``` + +## Eval +* SLURM ENV + +```shell +DATA_PATH='YOUR_PATH/list_kinetics-400' #it can be any string in our task +MODEL_PATH='YOUR_PATH/PRETRAIN_MODEL.pth' +OUTPUT_DIR='YOUR_PATH/OUTPUT_DIR' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p video \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --update_freq 1 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --enable_deepspeed \ + --val_freq 30 \ + --drop_path 0.2\ + --eval \ + ${PY_ARGS} +``` \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/akeval.sh b/Downstream/Spatial-Temporal-Action-Localization/akeval.sh new file mode 100644 index 0000000..eeae2fe --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/akeval.sh @@ -0,0 +1,55 @@ +#!/bin/sh +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +#module load anaconda/2021.05 +#conda activate mae_stad +export NCCL_IB_HCA=mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1 +export NCCL_IB_DISABLE=0 +export NCCL_SOCKET_IFNAME=eth0 +export NCCL_IB_GID_INDEX=3 +export NCCL_IB_TIMEOUT=23 +export NCCL_IB_RETRY_CNT=7 + +module load nccl/2.11.4-1_cuda11.1.1 +# Set the path to save checkpoints +OUTPUT_DIR='/data/home/scw6003/xingsen/VideoMAE_ava/workdir/vit_h_kinetics_eval_30e_ava_only/ckp29' +# path to Kinetics set (train.csv/val.csv/test.csv) +DATA_PATH='YOUR_PATH/list_kinetics-400' # no use +# path to pretrain model +MODEL_PATH='/data/home/scw6003/xingsen/VideoMAE_ava/workdir/vit_h_ak_good/checkpoint-29/mp_rank_00_model_states.pt' + +# We add repeated_aug (--num_sample = 2) on Kinetics-400 here, +# which could better performance while need more time for fine-tuning +NUM_NODES=${NUM_NODES:-4} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +# batch_size can be adjusted according to number of GPUs +# this script is for 64 GPUs (8 nodes x 8 GPUs) +srun -N 1 \ + --gres=gpu:1 \ + --ntasks-per-node=1 \ + --cpus-per-task=14 \ + --qos=gpugpu \ + python -u run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 2 \ + --update_freq 1 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --val_freq 30 \ + --drop_path 0.2 \ + --enable_deepspeed \ + --eval \ + diff --git a/Downstream/Spatial-Temporal-Action-Localization/akfinetune.sh b/Downstream/Spatial-Temporal-Action-Localization/akfinetune.sh new file mode 100644 index 0000000..1dac34a --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/akfinetune.sh @@ -0,0 +1,54 @@ +#!/bin/sh +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +#module load anaconda/2021.05 +#conda activate mae_stad +export NCCL_IB_HCA=mlx5_1:1,mlx5_2:1,mlx5_3:1,mlx5_4:1 +export NCCL_IB_DISABLE=0 +export NCCL_SOCKET_IFNAME=eth0 +export NCCL_IB_GID_INDEX=3 +export NCCL_IB_TIMEOUT=23 +export NCCL_IB_RETRY_CNT=7 + +module load nccl/2.11.4-1_cuda11.1.1 +# Set the path to save checkpoints +OUTPUT_DIR='./workdir/vit_h_ak_good' +# path to Kinetics set (train.csv/val.csv/test.csv) +DATA_PATH='YOUR_PATH/list_kinetics-400' # no use +# path to pretrain model +MODEL_PATH='/data/home/scw6003/chenguo/vit_ckpt/vit_h_hybridv2_pt_1200e_k700_ft.pth' + +# We add repeated_aug (--num_sample = 2) on Kinetics-400 here, +# which could better performance while need more time for fine-tuning +NUM_NODES=${NUM_NODES:-4} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +# batch_size can be adjusted according to number of GPUs +# this script is for 64 GPUs (8 nodes x 8 GPUs) +srun -N 4 \ + --gres=gpu:8 \ + --ntasks-per-node=8 \ + --cpus-per-task=14 \ + --qos=gpugpu \ + python -u run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --update_freq 1 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --val_freq 30 \ + --drop_path 0.2 \ + --enable_deepspeed \ + diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..58597e2 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__init__.py new file mode 100644 index 0000000..1d7cfcb --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__init__.py @@ -0,0 +1 @@ +from .defaults import _C as cfg diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..4bf86a4 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..d8e0df7 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/defaults.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/defaults.cpython-37.pyc new file mode 100644 index 0000000..8d26f48 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/defaults.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/defaults.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/defaults.cpython-39.pyc new file mode 100644 index 0000000..3186f5b Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/defaults.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/paths_catalog.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/paths_catalog.cpython-37.pyc new file mode 100644 index 0000000..5dd2b46 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/paths_catalog.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/paths_catalog.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/paths_catalog.cpython-39.pyc new file mode 100644 index 0000000..7a1ce21 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/__pycache__/paths_catalog.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/defaults.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/defaults.py new file mode 100644 index 0000000..af395d8 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/defaults.py @@ -0,0 +1,242 @@ +from yacs.config import CfgNode as CN + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- + +_C = CN() + +_C.MODEL = CN() + +_C.MODEL.WEIGHT = "" + +# ----------------------------------------------------------------------------- +# ROI action head config. +# ----------------------------------------------------------------------------- +_C.MODEL.ROI_ACTION_HEAD = CN() + +# Feature extractor config. +_C.MODEL.ROI_ACTION_HEAD.FEATURE_EXTRACTOR = "MaxpoolFeatureExtractor" + +# Config for pooler in feature extractor. +# Pooler type, can be 'pooling3d' or 'align3d' +_C.MODEL.ROI_ACTION_HEAD.POOLER_TYPE = 'align3d' +_C.MODEL.ROI_ACTION_HEAD.POOLER_RESOLUTION = 7 +_C.MODEL.ROI_ACTION_HEAD.POOLER_SCALE = 1./16 +# Only used for align3d +_C.MODEL.ROI_ACTION_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_ACTION_HEAD.MEAN_BEFORE_POOLER = False + +_C.MODEL.ROI_ACTION_HEAD.MLP_HEAD_DIM = 1024 + +# Action predictor config. +_C.MODEL.ROI_ACTION_HEAD.PREDICTOR = "FCPredictor" +_C.MODEL.ROI_ACTION_HEAD.DROPOUT_RATE = 0.0 +_C.MODEL.ROI_ACTION_HEAD.NUM_CLASSES = 80 + +# Action loss evaluator config. +_C.MODEL.ROI_ACTION_HEAD.PROPOSAL_PER_CLIP = 10 +_C.MODEL.ROI_ACTION_HEAD.NUM_PERSON_MOVEMENT_CLASSES = 14 +_C.MODEL.ROI_ACTION_HEAD.NUM_OBJECT_MANIPULATION_CLASSES = 49 +_C.MODEL.ROI_ACTION_HEAD.NUM_PERSON_INTERACTION_CLASSES = 17 + +_C.MODEL.ROI_ACTION_HEAD.POSE_LOSS_WEIGHT = 1.2 +_C.MODEL.ROI_ACTION_HEAD.OBJECT_LOSS_WEIGHT = float(_C.MODEL.ROI_ACTION_HEAD.NUM_OBJECT_MANIPULATION_CLASSES) +_C.MODEL.ROI_ACTION_HEAD.PERSON_LOSS_WEIGHT = float(_C.MODEL.ROI_ACTION_HEAD.NUM_PERSON_INTERACTION_CLASSES) + +# Focal loss config. +_C.MODEL.ROI_ACTION_HEAD.FOCAL_LOSS = CN() +_C.MODEL.ROI_ACTION_HEAD.FOCAL_LOSS.GAMMA = 2.0 +_C.MODEL.ROI_ACTION_HEAD.FOCAL_LOSS.ALPHA = -1. + +# ----------------------------------------------------------------------------- +# INPUT +# ----------------------------------------------------------------------------- +_C.INPUT = CN() +# Size of the smallest side of the image during training +_C.INPUT.MIN_SIZE_TRAIN = 256 +# Maximum size of the side of the image during training +_C.INPUT.MAX_SIZE_TRAIN = 464 +# Size of the smallest side of the image during testing +_C.INPUT.MIN_SIZE_TEST = 256 +# Maximum size of the side of the image during testing +_C.INPUT.MAX_SIZE_TEST = 464 +# Values to be used for image normalization, in rgb order +_C.INPUT.PIXEL_MEAN = [122.7717, 115.9465, 102.9801] +# Values to be used for image normalization +_C.INPUT.PIXEL_STD = [57.375, 57.375, 57.375] +# Convert image to BGR format (for Caffe2 models) +_C.INPUT.TO_BGR = False + +_C.INPUT.FRAME_NUM = 32 +_C.INPUT.FRAME_SAMPLE_RATE = 2 +_C.INPUT.TAU = 8 +_C.INPUT.ALPHA = 8 +_C.INPUT.SLOW_JITTER = False + +_C.INPUT.COLOR_JITTER = False +_C.INPUT.HUE_JITTER = 20.0 #in degree, hue is in 0~360 +_C.INPUT.SAT_JITTER = 0.1 +_C.INPUT.VAL_JITTER = 0.1 + +# ----------------------------------------------------------------------------- +# Dataset +# ----------------------------------------------------------------------------- +_C.DATASETS = CN() +# List of the dataset names for training, as present in paths_catalog.py +_C.DATASETS.TRAIN = () +# List of the dataset names for testing, as present in paths_catalog.py +_C.DATASETS.TEST = () + +# ----------------------------------------------------------------------------- +# DataLoader +# ----------------------------------------------------------------------------- +_C.DATALOADER = CN() +# Number of dataset loading threads +_C.DATALOADER.NUM_WORKERS = 4 +# If > 0, this enforces that each collated batch should have a size divisible +# by SIZE_DIVISIBILITY +_C.DATALOADER.SIZE_DIVISIBILITY = 16 +# If True, each batch should contain only images for which the aspect ratio +# is compatible. This groups portrait images together, and landscape images +# are not batched with portrait images. +_C.DATALOADER.ASPECT_RATIO_GROUPING = True + + +# ---------------------------------------------------------------------------- # +# Backbone options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE = CN() + +# The backbone conv body to use +# Available backbone conv-body should be registered in modeling.backbone.backbone.py +_C.MODEL.BACKBONE.CONV_BODY = "Slowfast-Resnet50" + +_C.MODEL.BACKBONE.FROZEN_BN = False + +_C.MODEL.BACKBONE.BN_MOMENTUM = 0.1 +_C.MODEL.BACKBONE.BN_EPSILON = 1e-05 + +# Kaiming: +# We may use 0 to initialize the residual branch of a residual block, +# so the inital state of the block is exactly identiy. This helps optimizaiton. +_C.MODEL.BACKBONE.BN_INIT_GAMMA = 0.0 + +_C.MODEL.BACKBONE.I3D = CN() +_C.MODEL.BACKBONE.I3D.CONV3_NONLOCAL = True +_C.MODEL.BACKBONE.I3D.CONV4_NONLOCAL = True +_C.MODEL.BACKBONE.I3D.CONV3_GROUP_NL = False + +# ---------------------------------------------------------------------------- # +# Slowfast options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE.SLOWFAST = CN() +_C.MODEL.BACKBONE.SLOWFAST.BETA = 1./8 +_C.MODEL.BACKBONE.SLOWFAST.LATERAL = 'tconv' +_C.MODEL.BACKBONE.SLOWFAST.SLOW = CN() +_C.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE = True +_C.MODEL.BACKBONE.SLOWFAST.SLOW.CONV3_NONLOCAL = True +_C.MODEL.BACKBONE.SLOWFAST.SLOW.CONV4_NONLOCAL = True +_C.MODEL.BACKBONE.SLOWFAST.SLOW.CONV3_GROUP_NL = False +_C.MODEL.BACKBONE.SLOWFAST.FAST = CN() +_C.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE = True +_C.MODEL.BACKBONE.SLOWFAST.FAST.CONV3_NONLOCAL = False +_C.MODEL.BACKBONE.SLOWFAST.FAST.CONV4_NONLOCAL = False +_C.MODEL.BACKBONE.SLOWFAST.FAST.CONV3_GROUP_NL = False + +# ---------------------------------------------------------------------------- # +# Nonlocal options +# ---------------------------------------------------------------------------- # +_C.MODEL.NONLOCAL = CN() +_C.MODEL.NONLOCAL.CONV_INIT_STD = 0.01 +_C.MODEL.NONLOCAL.USE_ZERO_INIT_CONV = False +_C.MODEL.NONLOCAL.NO_BIAS = False +_C.MODEL.NONLOCAL.USE_MAXPOOL = True +_C.MODEL.NONLOCAL.USE_SOFTMAX = True +_C.MODEL.NONLOCAL.USE_SCALE = True + +_C.MODEL.NONLOCAL.USE_BN = True +_C.MODEL.NONLOCAL.FROZEN_BN = False + +_C.MODEL.NONLOCAL.BN_MOMENTUM = 0.1 +_C.MODEL.NONLOCAL.BN_EPSILON = 1e-05 +_C.MODEL.NONLOCAL.BN_INIT_GAMMA = 0.0 + + +_C.MODEL.IA_STRUCTURE = CN() +_C.MODEL.IA_STRUCTURE.ACTIVE = False +_C.MODEL.IA_STRUCTURE.STRUCTURE = "serial" +_C.MODEL.IA_STRUCTURE.MAX_PER_SEC = 5 +_C.MODEL.IA_STRUCTURE.MAX_PERSON = 25 +_C.MODEL.IA_STRUCTURE.DIM_IN = 2304 +_C.MODEL.IA_STRUCTURE.DIM_INNER = 512 +_C.MODEL.IA_STRUCTURE.DIM_OUT = 512 +_C.MODEL.IA_STRUCTURE.PENALTY = True +_C.MODEL.IA_STRUCTURE.LENGTH = (30, 30) +_C.MODEL.IA_STRUCTURE.MEMORY_RATE = 1 +_C.MODEL.IA_STRUCTURE.FUSION = "concat" +_C.MODEL.IA_STRUCTURE.CONV_INIT_STD = 0.01 +_C.MODEL.IA_STRUCTURE.DROPOUT = 0. +_C.MODEL.IA_STRUCTURE.NO_BIAS = False +_C.MODEL.IA_STRUCTURE.I_BLOCK_LIST = ['P', 'O', 'M', 'P', 'O', 'M'] +_C.MODEL.IA_STRUCTURE.LAYER_NORM = False +_C.MODEL.IA_STRUCTURE.TEMPORAL_POSITION = True +_C.MODEL.IA_STRUCTURE.ROI_DIM_REDUCE = True +_C.MODEL.IA_STRUCTURE.USE_ZERO_INIT_CONV = True +_C.MODEL.IA_STRUCTURE.MAX_OBJECT = 0 + +# ---------------------------------------------------------------------------- # +# Solver +# ---------------------------------------------------------------------------- # +_C.SOLVER = CN() +_C.SOLVER.MAX_ITER = 75000 + +_C.SOLVER.BASE_LR = 0.02 +_C.SOLVER.BIAS_LR_FACTOR = 2 +_C.SOLVER.IA_LR_FACTOR = 1.0 + +_C.SOLVER.MOMENTUM = 0.9 + +_C.SOLVER.WEIGHT_DECAY = 0.0001 +_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0 + +# Use for bn +_C.SOLVER.WEIGHT_DECAY_BN = 0.0 + +_C.SOLVER.SCHEDULER = "warmup_multi_step" + +_C.SOLVER.GAMMA = 0.1 +_C.SOLVER.STEPS = (33750, 67500) + +_C.SOLVER.WARMUP_ON = True +_C.SOLVER.WARMUP_FACTOR = 1.0 / 3 +_C.SOLVER.WARMUP_ITERS = 500 +_C.SOLVER.WARMUP_METHOD = "linear" + +_C.SOLVER.CHECKPOINT_PERIOD = 5000 +_C.SOLVER.EVAL_PERIOD = 5000 + +# Number of video clips per batch +# This is global, so if we have 8 GPUs and VIDEOS_PER_BATCH = 16, each GPU will +# see 2 clips per batch +_C.SOLVER.VIDEOS_PER_BATCH = 32 + + +# ---------------------------------------------------------------------------- # +# Specific test options +# ---------------------------------------------------------------------------- # +_C.TEST = CN() +# Number of video clips per batch +# This is global, so if we have 8 GPUs and VIDEOS_PER_BATCH = 16, each GPU will +# see 2 clips per batch +_C.TEST.VIDEOS_PER_BATCH = 16 + +# Config used in inference. +_C.TEST.EXTEND_SCALE = (0.1, 0.05) +_C.TEST.BOX_THRESH = 0.8 +_C.TEST.ACTION_THRESH = 0.05 + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # +_C.OUTPUT_DIR = "." diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/paths_catalog.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/paths_catalog.py new file mode 100644 index 0000000..79545ea --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/config/paths_catalog.py @@ -0,0 +1,81 @@ +"""Centralized catalog of paths.""" + +import os + + +class DatasetCatalog(object): + DATA_DIR = "" + DATASETS = { + "ava_video_train_v2.2": { + "video_root": "/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/", + "ann_file": "/mnt/lustre/share_data/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2_min.json", + "box_file": "", + "eval_file_paths": { + "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2.csv", + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_excluded_timestamps_v2.2.csv", + }, + }, + "ava_video_val_v2.2": { + "video_root": "/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval", + "ann_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2_min.json", + "box_file": "/mnt/cache/xingsen/ava_dataset/AVA/boxes/ava_val_det_person_bbox.json", + "eval_file_paths": { + "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2.csv", + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + }, + }, + "kinetics_val":{ + 'kinetics_annfile':"/mnt/cache/xingsen/xingsen2/kinetics_val_v1.0.json", + "box_file":"/mnt/cache/xingsen/xingsen2/kinetics_person_box.json", + "eval_file_paths":{ + "csv_gt_file" : '/mnt/cache/xingsen/xingsen2/kinetics_val_gt_v1.0.csv', + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + } + }, + "ak_train":{ + "video_root" : "/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/", + "ava_annfile": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2_min.json", + "kinetics_annfile":"/mnt/cache/xingsen/xingsen2/kinetics_train_v1.0.json", + "ava_eval_file_path":{ + "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2.csv", + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_excluded_timestamps_v2.2.csv" + } + }, + "ak_val":{ + "video_root":"/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/", + "ava_annfile":"/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2_min.json", + "kinetics_annfile":"/mnt/cache/xingsen/xingsen2/kinetics_val_v1.0.json", + "kinetics_box_file":"/mnt/cache/xingsen/xingsen2/kinetics_person_box.json", + "ava_box_file":"/mnt/cache/xingsen/ava_dataset/AVA/boxes/ava_val_det_person_bbox.json", + "eval_file_paths":{ + "csv_gt_file":'/mnt/cache/xingsen/xingsen2/ak_val_gt.csv', + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + }, + "ava_eval_file_path":{ + "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2.csv", + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + }, + "kinetics_eval_file_path":{ + "csv_gt_file" : '/mnt/cache/xingsen/xingsen2/kinetics_val_gt_v1.0.csv', + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv" + } + } + } + + @staticmethod + def get(name): + if "ava_video" in name: + data_dir = DatasetCatalog.DATA_DIR + args = DatasetCatalog.DATASETS[name] + return dict( + factory="Dataset", + args=args + ) + raise RuntimeError("Dataset not available: {}".format(name)) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/ROIAlign3d.h b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/ROIAlign3d.h new file mode 100644 index 0000000..27ec792 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/ROIAlign3d.h @@ -0,0 +1,46 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor ROIAlign3d_forward(const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + if (input.is_cuda()) { +#ifdef WITH_CUDA + return ROIAlign3d_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor ROIAlign3d_backward(const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int length, + const int height, + const int width, + const int sampling_ratio) { + if (grad.is_cuda()) { +#ifdef WITH_CUDA + return ROIAlign3d_backward_cuda(grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, length, height, width, sampling_ratio); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/ROIPool3d.h b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/ROIPool3d.h new file mode 100644 index 0000000..43ece76 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/ROIPool3d.h @@ -0,0 +1,45 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + + +std::tuple ROIPool3d_forward(const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width) { + if (input.is_cuda()) { +#ifdef WITH_CUDA + return ROIPool3d_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor ROIPool3d_backward(const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& rois, + const at::Tensor& argmax, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int length, + const int height, + const int width) { + if (grad.is_cuda()) { +#ifdef WITH_CUDA + return ROIPool3d_backward_cuda(grad, input, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, length, height, width); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/SigmoidFocalLoss.h b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/SigmoidFocalLoss.h new file mode 100644 index 0000000..f6db8ba --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/SigmoidFocalLoss.h @@ -0,0 +1,39 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor SigmoidFocalLoss_forward( + const at::Tensor& logits, + const at::Tensor& targets, + const float gamma, + const float alpha) { + if (logits.is_cuda()) { +#ifdef WITH_CUDA + return SigmoidFocalLoss_forward_cuda(logits, targets, gamma, alpha); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor SigmoidFocalLoss_backward( + const at::Tensor& logits, + const at::Tensor& targets, + const at::Tensor& d_losses, + const float gamma, + const float alpha) { + if (logits.is_cuda()) { +#ifdef WITH_CUDA + return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses, gamma, alpha); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/SoftmaxFocalLoss.h b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/SoftmaxFocalLoss.h new file mode 100644 index 0000000..034bacc --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/SoftmaxFocalLoss.h @@ -0,0 +1,40 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +std::tuple SoftmaxFocalLoss_forward( + const at::Tensor& logits, + const at::Tensor& targets, + const float gamma, + const float alpha) { + if (logits.is_cuda()) { +#ifdef WITH_CUDA + return SoftmaxFocalLoss_forward_cuda(logits, targets, gamma, alpha); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor SoftmaxFocalLoss_backward( + const at::Tensor& logits, + const at::Tensor& targets, + const at::Tensor& P, + const at::Tensor& d_losses, + const float gamma, + const float alpha) { + if (logits.is_cuda()) { +#ifdef WITH_CUDA + return SoftmaxFocalLoss_backward_cuda(logits, targets, P, d_losses, gamma, alpha); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cpu/vision.h b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cpu/vision.h new file mode 100644 index 0000000..a13f86f --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cpu/vision.h @@ -0,0 +1,2 @@ +#pragma once +#include diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/ROIAlign3d_cuda.cu b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/ROIAlign3d_cuda.cu new file mode 100644 index 0000000..c058f05 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/ROIAlign3d_cuda.cu @@ -0,0 +1,351 @@ +#include +#include + +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + + +template +__device__ T bilinear_interpolate(const T* bottom_data, + const int height, const int width, + T y, T x, + const int index /* index for debug only*/) { + + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + //empty + return 0; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + int y_low = (int) y; + int x_low = (int) x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T) y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T) x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = bottom_data[y_low * width + x_low]; + T v2 = bottom_data[y_low * width + x_high]; + T v3 = bottom_data[y_high * width + x_low]; + T v4 = bottom_data[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__global__ void RoIAlign3dForward(const int nthreads, const T* bottom_data, + const T spatial_scale, const int channels, + const int length, const int height, const int width, + const int pooled_height, const int pooled_width, + const int sampling_ratio, + const T* bottom_rois, T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, l, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int l = (index / pooled_width / pooled_height) % length; + int c = (index / pooled_width / pooled_height / length) % channels; + int n = index / pooled_width / pooled_height / length / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_bottom_rois[1] * spatial_scale; + T roi_start_h = offset_bottom_rois[2] * spatial_scale; + T roi_end_w = offset_bottom_rois[3] * spatial_scale; + T roi_end_h = offset_bottom_rois[4] * spatial_scale; + // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); + // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); + // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); + // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); + + // Force malformed ROIs to be 1x1 + T roi_width = max(roi_end_w - roi_start_w, (T)1.); + T roi_height = max(roi_end_h - roi_start_h, (T)1.); + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_bottom_data = bottom_data + ((roi_batch_ind * channels + c) * length + l) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + static_cast(iy + .5f) * bin_size_h / static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix ++) + { + const T x = roi_start_w + pw * bin_size_w + static_cast(ix + .5f) * bin_size_w / static_cast(roi_bin_grid_w); + + T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + + +template +__device__ void bilinear_interpolate_gradient( + const int height, const int width, + T y, T x, + T & w1, T & w2, T & w3, T & w4, + int & x_low, int & x_high, int & y_low, int & y_high, + const int index /* index for debug only*/) { + + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + //empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + y_low = (int) y; + x_low = (int) x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T) y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T) x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = bottom_data[y_low * width + x_low]; + // T v2 = bottom_data[y_low * width + x_high]; + // T v3 = bottom_data[y_high * width + x_low]; + // T v4 = bottom_data[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +__global__ void RoIAlign3dBackwardFeature(const int nthreads, const T* top_diff, + const int num_rois, const T spatial_scale, + const int channels, const int length, const int height, const int width, + const int pooled_height, const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* bottom_rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, l, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int l = (index / pooled_width / pooled_height) % length; + int c = (index / pooled_width / pooled_height / length) % channels; + int n = index / pooled_width / pooled_height / length / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_bottom_rois[1] * spatial_scale; + T roi_start_h = offset_bottom_rois[2] * spatial_scale; + T roi_end_w = offset_bottom_rois[3] * spatial_scale; + T roi_end_h = offset_bottom_rois[4] * spatial_scale; + // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); + // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); + // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); + // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); + + // Force malformed ROIs to be 1x1 + T roi_width = max(roi_end_w - roi_start_w, (T)1.); + T roi_height = max(roi_end_h - roi_start_h, (T)1.); + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = bottom_diff + ((roi_batch_ind * channels + c) * length + l) * height * width; + + int top_offset = ((n * channels + c) * length + l) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + static_cast(iy + .5f) * bin_size_h / static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix ++) + { + const T x = roi_start_w + pw * bin_size_w + static_cast(ix + .5f) * bin_size_w / static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient(height, width, y, x, + w1, w2, w3, w4, + x_low, x_high, y_low, y_high, + index); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) + { + atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlign3dBackward + + +at::Tensor ROIAlign3d_forward_cuda(const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto length = input.size(2); + auto height = input.size(3); + auto width = input.size(4); + + auto output = at::empty({num_rois, channels, length, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * length * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L)); + dim3 block(512); + + if (output.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return output; + } + + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign3d_forward", [&] { + RoIAlign3dForward<<>>( + output_size, + input.contiguous().data_ptr(), + spatial_scale, + channels, + length, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois.contiguous().data_ptr(), + output.data_ptr()); + }); + THCudaCheck(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlign3d_backward_cuda(const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int length, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); + + auto num_rois = rois.size(0); + auto grad_input = at::zeros({batch_size, channels, length, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L)); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return grad_input; + } + + AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign3d_backward", [&] { + RoIAlign3dBackwardFeature<<>>( + grad.numel(), + grad.contiguous().data_ptr(), + num_rois, + spatial_scale, + channels, + length, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois.contiguous().data_ptr()); + }); + THCudaCheck(cudaGetLastError()); + return grad_input; +} diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/ROIPool3d_cuda.cu b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/ROIPool3d_cuda.cu new file mode 100644 index 0000000..f22ae53 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/ROIPool3d_cuda.cu @@ -0,0 +1,207 @@ +#include +#include + +#include +#include +#include + + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + + +template +__global__ void RoIPool3dFForward(const int nthreads, const T* bottom_data, + const T spatial_scale, const int channels, const int length, const int height, + const int width, const int pooled_height, const int pooled_width, + const T* bottom_rois, T* top_data, int* argmax_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, l, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int l = (index / pooled_width / pooled_height) % length; + int c = (index / pooled_width / pooled_height / length) % channels; + int n = index / pooled_width / pooled_height / length / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); + int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); + int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); + int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); + + // Force malformed ROIs to be 1x1 + int roi_width = max(roi_end_w - roi_start_w + 1, 1); + int roi_height = max(roi_end_h - roi_start_h + 1, 1); + T bin_size_h = static_cast(roi_height) + / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) + / static_cast(pooled_width); + + int hstart = static_cast(floor(static_cast(ph) + * bin_size_h)); + int wstart = static_cast(floor(static_cast(pw) + * bin_size_w)); + int hend = static_cast(ceil(static_cast(ph + 1) + * bin_size_h)); + int wend = static_cast(ceil(static_cast(pw + 1) + * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = min(max(hstart + roi_start_h, 0), height); + hend = min(max(hend + roi_start_h, 0), height); + wstart = min(max(wstart + roi_start_w, 0), width); + wend = min(max(wend + roi_start_w, 0), width); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + // Define an empty pooling region to be zero + T maxval = is_empty ? 0 : -FLT_MAX; + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + int maxidx = -1; + const T* offset_bottom_data = + bottom_data + ((roi_batch_ind * channels + c) * length + l) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int bottom_index = h * width + w; + if (offset_bottom_data[bottom_index] > maxval) { + maxval = offset_bottom_data[bottom_index]; + maxidx = bottom_index; + } + } + } + top_data[index] = maxval; + argmax_data[index] = maxidx; + } +} + +template +__global__ void RoIPool3dFBackward(const int nthreads, const T* top_diff, + const int* argmax_data, const int num_rois, const T spatial_scale, + const int channels, const int length, const int height, const int width, + const int pooled_height, const int pooled_width, T* bottom_diff, + const T* bottom_rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, l, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int l = (index / pooled_width / pooled_height) % length; + int c = (index / pooled_width / pooled_height / length) % channels; + int n = index / pooled_width / pooled_height / length / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + int bottom_offset = ((roi_batch_ind * channels + c) * length + l) * height * width; + int top_offset = ((n * channels + c) * length + l) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + T* offset_bottom_diff = bottom_diff + bottom_offset; + const int* offset_argmax_data = argmax_data + top_offset; + + int argmax = offset_argmax_data[ph * pooled_width + pw]; + if (argmax != -1) { + atomicAdd( + offset_bottom_diff + argmax, + static_cast(offset_top_diff[ph * pooled_width + pw])); + + } + } +} + +std::tuple ROIPool3d_forward_cuda(const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width) { + AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto length = input.size(2); + auto height = input.size(3); + auto width = input.size(4); + + auto output = at::empty({num_rois, channels, length, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * length * pooled_height * pooled_width * channels; + auto argmax = at::zeros({num_rois, channels, length, pooled_height, pooled_width}, input.options().dtype(at::kInt)); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L)); + dim3 block(512); + + if (output.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return std::make_tuple(output, argmax); + } + + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIPool3d_forward", [&] { + RoIPool3dFForward<<>>( + output_size, + input.contiguous().data_ptr(), + spatial_scale, + channels, + length, + height, + width, + pooled_height, + pooled_width, + rois.contiguous().data_ptr(), + output.data_ptr(), + argmax.data_ptr()); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(output, argmax); +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIPool3d_backward_cuda(const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& rois, + const at::Tensor& argmax, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int length, + const int height, + const int width) { + AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor"); + // TODO add more checks + + auto num_rois = rois.size(0); + auto grad_input = at::zeros({batch_size, channels, length, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L)); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return grad_input; + } + + AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIPool3d_backward", [&] { + RoIPool3dFBackward<<>>( + grad.numel(), + grad.contiguous().data_ptr(), + argmax.data_ptr(), + num_rois, + spatial_scale, + channels, + length, + height, + width, + pooled_height, + pooled_width, + grad_input.data_ptr(), + rois.contiguous().data_ptr()); + }); + THCudaCheck(cudaGetLastError()); + return grad_input; +} diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/SigmoidFocalLoss_cuda.cu b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/SigmoidFocalLoss_cuda.cu new file mode 100644 index 0000000..df7df05 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/SigmoidFocalLoss_cuda.cu @@ -0,0 +1,188 @@ +// This file is modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu +// Jiajun Tang +// yelantingfeng@sjtu.edu.cn +#include +#include + +#include +#include +#include + +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + + +template +__global__ void SigmoidFocalLossForward(const int nthreads, + const T* logits, + const T* targets, + const int num_classes, + const float gamma, + const float alpha, + T* losses) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + + // Decide it is positive or negative case. + T c1 = targets[i]; + T c2 = (1.0 - c1); + + // alpha flag. + T af1 = (alpha >= 0); + T af2 = (1.0 - af1); + + T zn = (1.0 - alpha) * af1 + af2; + T zp = (alpha) * af1 + af2; + + // p = 1. / 1. + expf(-x); p = sigmoid(x) + T p = 1. / (1. + expf(-logits[i])); + + // (1-p)**gamma * log(p) where + T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); + + // p**gamma * log(1-p) + T term2 = powf(p, gamma) * + (-1. * logits[i] * (logits[i] >= 0) - + logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); + + losses[i] = 0.0; + losses[i] += -c1 * term1 * zp; + losses[i] += -c2 * term2 * zn; + + } // CUDA_1D_KERNEL_LOOP +} // SigmoidFocalLossForward + + +template +__global__ void SigmoidFocalLossBackward(const int nthreads, + const T* logits, + const T* targets, + const T* d_losses, + const int num_classes, + const float gamma, + const float alpha, + T* d_logits) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + + // Decide it is positive or negative case. + T c1 = targets[i]; + T c2 = (1 - c1); + + // alpha flag. + T af1 = (alpha >= 0); + T af2 = (1.0 - af1); + + T zn = (1.0 - alpha) * af1 + af2; + T zp = (alpha) * af1 + af2; + // p = 1. / 1. + expf(-x); p = sigmoid(x) + T p = 1. / (1. + expf(-logits[i])); + + // (1-p)**g * (1 - p - g*p*log(p) + T term1 = powf((1. - p), gamma) * + (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); + + // (p**g) * (g*(1-p)*log(1-p) - p) + T term2 = powf(p, gamma) * + ((-1. * logits[i] * (logits[i] >= 0) - + logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * + (1. - p) * gamma - p); + d_logits[i] = 0.0; + d_logits[i] += -c1 * term1 * zp; + d_logits[i] += -c2 * term2 * zn; + d_logits[i] = d_logits[i] * d_losses[i]; + + } // CUDA_1D_KERNEL_LOOP +} // SigmoidFocalLossBackward + + +at::Tensor SigmoidFocalLoss_forward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const float gamma, + const float alpha) { + AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + AT_ASSERTM(targets.dim() == 2, "targets should be NxClass"); + AT_ASSERTM(logits.size(0) == targets.size(0) && logits.size(1) == targets.size(1), + "targets should have exactly the same shape with logits."); + + const int num_samples = logits.size(0); + const int num_classes = logits.size(1); + + auto losses = at::empty({num_samples, num_classes}, logits.options()); + auto losses_size = static_cast(num_samples) * num_classes; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); + dim3 block(512); + + if (losses.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return losses; + } + + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { + SigmoidFocalLossForward<<>>( + losses_size, + logits.contiguous().data_ptr(), + targets.contiguous().data_ptr(), + num_classes, + gamma, + alpha, + losses.data_ptr()); + }); + THCudaCheck(cudaGetLastError()); + return losses; +} + + +at::Tensor SigmoidFocalLoss_backward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const at::Tensor& d_losses, + const float gamma, + const float alpha) { + AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(d_losses.is_cuda(), "d_losses must be a CUDA tensor"); + + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + AT_ASSERTM(targets.dim() == 2, "targets should be NxClass"); + AT_ASSERTM(logits.size(0) == targets.size(0) && logits.size(1) == targets.size(1), + "targets should have exactly the same shape with logits."); + + const int num_samples = logits.size(0); + const int num_classes = logits.size(1); + + auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); + auto d_logits_size = static_cast(num_samples) * num_classes; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); + dim3 block(512); + + if (d_logits.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return d_logits; + } + + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { + SigmoidFocalLossBackward<<>>( + d_logits_size, + logits.contiguous().data_ptr(), + targets.contiguous().data_ptr(), + d_losses.contiguous().data_ptr(), + num_classes, + gamma, + alpha, + d_logits.data_ptr()); + }); + + THCudaCheck(cudaGetLastError()); + return d_logits; +} + diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/SoftmaxFocalLoss_cuda.cu b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/SoftmaxFocalLoss_cuda.cu new file mode 100644 index 0000000..8569124 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/SoftmaxFocalLoss_cuda.cu @@ -0,0 +1,239 @@ +// This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/softmax_focal_loss_op.cu +// Jiajun Tang +// yelantingfeng@sjtu.edu.cn +#include +#include + +#include +#include +#include + +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__global__ void SpatialSoftmaxForward(const int nthreads, + const T* Xdata, + T* Pdata, + const int num_classes) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + // Subtract max on each cell for numerical reasons + T max_val = -FLT_MAX; + for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ + max_val = max(max_val, Xdata[c]); + } + // Exponentiate + T expsum = 0.0; + for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ + T expx = exp(Xdata[c] - max_val); + Pdata[c] = expx; + expsum += expx; + } + // Normalize + for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ + Pdata[c] /= expsum; + } + } +} + +template +__global__ void SoftmaxFocalLossForward(const int nthreads, + const T* Pdata, + const int* targets, + const int num_classes, + const float gamma, + const float alpha, + T* losses) { + CUDA_1D_KERNEL_LOOP(n, nthreads) { + + const int label = static_cast(targets[n]); + + // alpha flag. + T af1 = (alpha >= 0); + T af2 = (1.0 - af1); + + T z = ((label == 0) * (1 - alpha) + + (label >= 1) * alpha) * af1 + af2; + + losses[n] = 0.0; + if (label >= 0){ + int idx = n * num_classes + label; + losses[n] = + -(pow(1.0 - Pdata[idx], gamma) * + log(max(Pdata[idx], FLT_MIN))) * z; + } + } // CUDA_1D_KERNEL_LOOP +} // SoftmaxFocalLossForward + +template +__global__ void SoftmaxFocalLossBackwardWeight(const int nthreads, + const T* Pdata, + const int* targets, + const int num_classes, + const float gamma, + const float alpha, + T* buff) { + CUDA_1D_KERNEL_LOOP(n, nthreads) { + + const int label = static_cast(targets[n]); + + // alpha flag. + T af1 = (alpha >= 0); + T af2 = (1.0 - af1); + + T z = ((label == 0) * (1 - alpha) + + (label >= 1) * alpha) * af1 + af2; + + buff[n] = 0.0; + if (label >= 0) { + int idx = n * num_classes + label; + T onemp = 1. - Pdata[idx]; + T p = Pdata[idx]; + buff[n] = + (-pow(onemp, gamma) + + gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z; + } + } +} + +template +__global__ void SoftmaxFocalLossBackward(const int nthreads, + const T* Pdata, + const int* targets, + const T* d_losses, + const T* buff, + const int num_classes, + const float gamma, + const float alpha, + T* d_logits) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + + int n = i / num_classes; + int c = i % num_classes; + T d_loss = d_losses[n]; + + const int label = static_cast(targets[n]); + + T c1 = (label >= 0) * 1.0; + T c2 = (label == c) * 1.0; + d_logits[i] = c1 * d_loss * buff[n] * (c2 - Pdata[i]); + } // CUDA_1D_KERNEL_LOOP +} // SoftmaxFocalLossBackward + + +std::tuple SoftmaxFocalLoss_forward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const float gamma, + const float alpha) { + AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + AT_ASSERTM(targets.dim() == 1, "targets should be N"); + AT_ASSERTM(logits.size(0) == targets.size(0), + "dim(0) of targets should be the same as dim(0) of logits."); + + const int num_samples = logits.size(0); + const int num_classes = logits.size(1); + + auto losses = at::empty({num_samples}, logits.options()); + auto losses_size = static_cast(num_samples); + auto P = at::empty({num_samples, num_classes}, logits.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); + dim3 block(512); + + if (losses.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return std::make_tuple(losses, P); + } + + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SpatialSoftmax_forward", [&] { + SpatialSoftmaxForward<<>>( + losses_size, + logits.contiguous().data_ptr(), + P.data_ptr(), + num_classes); + }); + + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_forward", [&] { + SoftmaxFocalLossForward<<>>( + losses_size, + P.data_ptr(), + targets.contiguous().data_ptr(), + num_classes, + gamma, + alpha, + losses.data_ptr()); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(losses, P); +} + + +at::Tensor SoftmaxFocalLoss_backward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const at::Tensor& P, + const at::Tensor& d_losses, + const float gamma, + const float alpha) { + AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(d_losses.is_cuda(), "d_losses must be a CUDA tensor"); + + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + AT_ASSERTM(targets.dim() == 1, "targets should be N"); + AT_ASSERTM(logits.size(0) == targets.size(0), + "dim(0) of targets should be the same as dim(0) of logits."); + + const int num_samples = logits.size(0); + const int num_classes = logits.size(1); + + auto buff = at::zeros({num_samples},logits.options()); + auto buff_size = static_cast(num_samples); + auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); + auto d_logits_size = static_cast(num_samples) * num_classes; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid1(std::min(THCCeilDiv(buff_size, 512L), 4096L)); + dim3 grid2(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); + dim3 block(512); + + if (d_logits.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return d_logits; + } + + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_backwardWeight", [&] { + SoftmaxFocalLossBackwardWeight<<>>( + buff_size, + P.contiguous().data_ptr(), + targets.contiguous().data_ptr(), + num_classes, + gamma, + alpha, + buff.data_ptr()); + }); + + AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_backward", [&] { + SoftmaxFocalLossBackward<<>>( + d_logits_size, + P.contiguous().data_ptr(), + targets.contiguous().data_ptr(), + d_losses.contiguous().data_ptr(), + buff.data_ptr(), + num_classes, + gamma, + alpha, + d_logits.data_ptr()); + }); + + THCudaCheck(cudaGetLastError()); + return d_logits; +} diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/vision.h b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/vision.h new file mode 100644 index 0000000..31eeb93 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/cuda/vision.h @@ -0,0 +1,68 @@ +#pragma once +#include + +at::Tensor ROIAlign3d_forward_cuda(const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlign3d_backward_cuda(const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int length, + const int height, + const int width, + const int sampling_ratio); + +std::tuple ROIPool3d_forward_cuda(const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width); + +at::Tensor ROIPool3d_backward_cuda(const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& rois, + const at::Tensor& argmax, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int length, + const int height, + const int width); + + +at::Tensor SigmoidFocalLoss_forward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const float gamma, + const float alpha); + +at::Tensor SigmoidFocalLoss_backward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const at::Tensor& d_losses, + const float gamma, + const float alpha); + +std::tuple SoftmaxFocalLoss_forward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const float gamma, + const float alpha); + +at::Tensor SoftmaxFocalLoss_backward_cuda( + const at::Tensor& logits, + const at::Tensor& targets, + const at::Tensor& P, + const at::Tensor& d_losses, + const float gamma, + const float alpha); \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/vision.cpp b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/vision.cpp new file mode 100644 index 0000000..b681cda --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/csrc/vision.cpp @@ -0,0 +1,16 @@ +#include "ROIAlign3d.h" +#include "ROIPool3d.h" +#include "SoftmaxFocalLoss.h" +#include "SigmoidFocalLoss.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("roi_align_3d_forward",&ROIAlign3d_forward, "ROIAlign3d_forward"); + m.def("roi_align_3d_backward",&ROIAlign3d_backward, "ROIAlign3d_backward"); + m.def("roi_pool_3d_forward", &ROIPool3d_forward, "ROIPool3d_forward"); + m.def("roi_pool_3d_backward", &ROIPool3d_backward, "ROIPool3d_backward"); + m.def("sigmoid_focalloss_forward", &SigmoidFocalLoss_forward, "SigmoidFocalLoss_forward"); + m.def("sigmoid_focalloss_backward", &SigmoidFocalLoss_backward, "SigmoidFocalLoss_backward"); + m.def("softmax_focalloss_forward", &SoftmaxFocalLoss_forward, "SoftmaxFocalLoss_forward"); + m.def("softmax_focalloss_backward", &SoftmaxFocalLoss_backward, "SoftmaxFocalLoss_backward"); +} diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__init__.py new file mode 100644 index 0000000..ad4821e --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__init__.py @@ -0,0 +1 @@ +from .build import make_data_loader diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..8f346e6 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..ff19985 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/build.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/build.cpython-37.pyc new file mode 100644 index 0000000..4dab02e Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/build.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/build.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/build.cpython-39.pyc new file mode 100644 index 0000000..dbebaa9 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/build.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/collate_batch.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/collate_batch.cpython-37.pyc new file mode 100644 index 0000000..5a0d442 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/collate_batch.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/collate_batch.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/collate_batch.cpython-39.pyc new file mode 100644 index 0000000..de8c6c7 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/__pycache__/collate_batch.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/build.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/build.py new file mode 100644 index 0000000..c1024bf --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/build.py @@ -0,0 +1,179 @@ +import bisect +import copy + +import torch.utils.data +from alphaction.utils.comm import get_world_size +from alphaction.utils.IA_helper import has_object +import alphaction.config.paths_catalog as paths_catalog + +from . import datasets as D +from . import samplers + +from .collate_batch import BatchCollator +from .transforms import build_transforms, build_object_transforms + +def build_dataset(cfg, dataset_list, transforms, dataset_catalog, is_train=True, object_transforms=None): + """ + Arguments: + cfg: config object for the experiment. + dataset_list (list[str]): Contains the names of the datasets, i.e., + ava_video_train_v2.2, ava_video_val_v2.2, etc.. + transforms (callable): transforms to apply to each (clip, target) sample. + dataset_catalog (DatasetCatalog): contains the information on how to + construct a dataset. + is_train (bool): whether to setup the dataset for training or testing. + object_transforms: transforms to apply to object boxes. + """ + if not isinstance(dataset_list, (list, tuple)): + raise RuntimeError( + "dataset_list should be a list of strings, got {}".format(dataset_list) + ) + datasets = [] + for dataset_name in dataset_list: + data = dataset_catalog.get(dataset_name) + factory = getattr(D, data["factory"]) + args = data["args"] + if data["factory"] == "AVAVideoDataset": + # for AVA, we want to remove clips without annotations + # during training + args["remove_clips_without_annotations"] = is_train + args["frame_span"] = cfg.INPUT.FRAME_NUM*cfg.INPUT.FRAME_SAMPLE_RATE + if not is_train: + args["box_thresh"] = cfg.TEST.BOX_THRESH + args["action_thresh"] = cfg.TEST.ACTION_THRESH + else: + # disable box_file when train, use only gt to train + args["box_file"] = None + if has_object(cfg.MODEL.IA_STRUCTURE): + args["object_transforms"] = object_transforms + else: + args["object_file"] = None + + args["transforms"] = transforms + # make dataset from factory + dataset = factory(**args) + datasets.append(dataset) + + # for testing, return a list of datasets + if not is_train: + return datasets + + # for training, concatenate all datasets into a single one + dataset = datasets[0] + if len(datasets) > 1: + dataset = D.ConcatDataset(datasets) + + return [dataset] + + +def make_data_sampler(dataset, shuffle, distributed): + if distributed: + return samplers.DistributedSampler(dataset, shuffle=shuffle) + if shuffle: + sampler = torch.utils.data.sampler.RandomSampler(dataset) + else: + sampler = torch.utils.data.sampler.SequentialSampler(dataset) + return sampler + + +def _quantize(x, bins): + bins = copy.copy(bins) + bins = sorted(bins) + quantized = list(map(lambda y: bisect.bisect_right(bins, y), x)) + return quantized + + +def _compute_aspect_ratios(dataset): + aspect_ratios = [] + for i in range(len(dataset)): + video_info = dataset.get_video_info(i) + aspect_ratio = float(video_info["height"]) / float(video_info["width"]) + aspect_ratios.append(aspect_ratio) + return aspect_ratios + + +def make_batch_data_sampler( + dataset, sampler, aspect_grouping, videos_per_batch, num_iters=None, start_iter=0, drop_last=False +): + if aspect_grouping: + if not isinstance(aspect_grouping, (list, tuple)): + aspect_grouping = [aspect_grouping] + aspect_ratios = _compute_aspect_ratios(dataset) + group_ids = _quantize(aspect_ratios, aspect_grouping) + batch_sampler = samplers.GroupedBatchSampler( + sampler, group_ids, videos_per_batch, drop_uneven=drop_last + ) + else: + batch_sampler = torch.utils.data.sampler.BatchSampler( + sampler, videos_per_batch, drop_last=drop_last + ) + if num_iters is not None: + batch_sampler = samplers.IterationBasedBatchSampler( + batch_sampler, num_iters, start_iter + ) + return batch_sampler + + +def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0): + num_gpus = get_world_size() + if is_train: + # for training + videos_per_batch = cfg.SOLVER.VIDEOS_PER_BATCH + assert ( + videos_per_batch % num_gpus == 0 + ), "SOLVER.VIDEOS_PER_BATCH ({}) must be divisible by the number " + "of GPUs ({}) used.".format(videos_per_batch, num_gpus) + videos_per_gpu = videos_per_batch // num_gpus + shuffle = True + drop_last = True + num_iters = cfg.SOLVER.MAX_ITER + else: + # for testing + videos_per_batch = cfg.TEST.VIDEOS_PER_BATCH + assert ( + videos_per_batch % num_gpus == 0 + ), "TEST.VIDEOS_PER_BATCH ({}) must be divisible by the number " + "of GPUs ({}) used.".format(videos_per_batch, num_gpus) + videos_per_gpu = videos_per_batch // num_gpus + shuffle = False if not is_distributed else True + drop_last = False + num_iters = None + start_iter = 0 + + # group images which have similar aspect ratio. In this case, we only + # group in two cases: those with width / height > 1, and the other way around, + # but the code supports more general grouping strategy + aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else [] + + DatasetCatalog = paths_catalog.DatasetCatalog + dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST + + # build dataset + transforms = build_transforms(cfg, is_train) + if has_object(cfg.MODEL.IA_STRUCTURE): + object_transforms = build_object_transforms(cfg, is_train=is_train) + else: + object_transforms = None + datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog, is_train, object_transforms) + + # build sampler and dataloader + data_loaders = [] + for dataset in datasets: + sampler = make_data_sampler(dataset, shuffle, is_distributed) + batch_sampler = make_batch_data_sampler( + dataset, sampler, aspect_grouping, videos_per_gpu, num_iters, start_iter, drop_last + ) + collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY) + num_workers = cfg.DATALOADER.NUM_WORKERS + data_loader = torch.utils.data.DataLoader( + dataset, + num_workers=num_workers, + batch_sampler=batch_sampler, + collate_fn=collator, + ) + data_loaders.append(data_loader) + if is_train: + # during training, a single (possibly concatenated) data_loader is returned + assert len(data_loaders) == 1 + return data_loaders[0] + return data_loaders \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/collate_batch.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/collate_batch.py new file mode 100644 index 0000000..9fab560 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/collate_batch.py @@ -0,0 +1,47 @@ +import math + + +def batch_different_videos(videos, size_divisible=0): + ''' + :param videos: a list of video tensors + :param size_divisible: output_size(width and height) should be divisble by this param + :return: batched videos as a single tensor + ''' + assert isinstance(videos, (tuple, list)) + max_size = tuple(max(s) for s in zip(*[clip.shape for clip in videos])) + + if size_divisible > 0: + stride = size_divisible + max_size = list(max_size) + max_size[2] = int(math.ceil(max_size[2] / stride) * stride) + max_size[3] = int(math.ceil(max_size[3] / stride) * stride) + max_size = tuple(max_size) + + batch_shape = (len(videos),) + max_size + batched_clips = videos[0].new(*batch_shape).zero_() + for clip, pad_clip in zip(videos, batched_clips): + pad_clip[:clip.shape[0], :clip.shape[1], :clip.shape[2], :clip.shape[3]].copy_(clip) + + return batched_clips + + +class BatchCollator(object): + """ + From a list of samples from the dataset, + returns the batched objectimages and targets. + This should be passed to the DataLoader + """ + + def __init__(self, size_divisible=0): + self.divisible = size_divisible + self.size_divisible = self.divisible + + def __call__(self, batch): + transposed_batch = list(zip(*batch)) + slow_clips = batch_different_videos(transposed_batch[0], self.size_divisible) + fast_clips = batch_different_videos(transposed_batch[1], self.size_divisible) + boxes = transposed_batch[2] + objects = transposed_batch[3] + extras = transposed_batch[4] + clip_ids = transposed_batch[5] + return slow_clips, fast_clips, boxes, objects, extras, clip_ids diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__init__.py new file mode 100644 index 0000000..e903065 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__init__.py @@ -0,0 +1,4 @@ +from .concat_dataset import ConcatDataset +from .ava import AVAVideoDataset + +__all__ = ["ConcatDataset", "AVAVideoDataset"] \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..07dc311 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..25a3807 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/ava.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/ava.cpython-37.pyc new file mode 100644 index 0000000..43eaeb9 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/ava.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/ava.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/ava.cpython-39.pyc new file mode 100644 index 0000000..f294674 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/ava.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/concat_dataset.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/concat_dataset.cpython-37.pyc new file mode 100644 index 0000000..4311a03 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/concat_dataset.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/concat_dataset.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/concat_dataset.cpython-39.pyc new file mode 100644 index 0000000..a0d27de Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/__pycache__/concat_dataset.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/ava.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/ava.py new file mode 100644 index 0000000..49506a1 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/ava.py @@ -0,0 +1,292 @@ +import os +import torch.utils.data as data +import time +import torch +import numpy as np +from alphaction.structures.bounding_box import BoxList +from collections import defaultdict +from alphaction.utils.video_decode import av_decode_video + +import json + +# This is used to avoid pytorch issuse #13246 +class NpInfoDict(object): + def __init__(self, info_dict, key_type=None, value_type=None): + keys = sorted(list(info_dict.keys())) + self.key_arr = np.array(keys, dtype=key_type) + self.val_arr = np.array([info_dict[k] for k in keys], dtype=value_type) + # should not be used in dataset __getitem__ + self._key_idx_map = {k:i for i,k in enumerate(keys)} + def __getitem__(self, idx): + return self.key_arr[idx], self.val_arr[idx] + def update_value(self, idx, value): + self.key_arr[idx],self.val_arr[idx] = value + def __len__(self): + return len(self.key_arr) + def convert_key(self, org_key): + # convert relevant variable whose original value is in the key set. + # should not be used in __getitem__ + return self._key_idx_map[org_key] + +# This is used to avoid pytorch issuse #13246 +class NpBoxDict(object): + def __init__(self, id_to_box_dict, key_list=None, value_types=[]): + value_fields, value_types = list(zip(*value_types)) + assert "bbox" in value_fields + + if key_list is None: + key_list = sorted(list(id_to_box_dict.keys())) + self.length = len(key_list) + + pointer_list = [] + value_lists = {field: [] for field in value_fields} + cur = 0 + pointer_list.append(cur) + for k in key_list: + box_infos = id_to_box_dict[k] + cur += len(box_infos) + pointer_list.append(cur) + for box_info in box_infos: + for field in value_fields: + value_lists[field].append(box_info[field]) + self.pointer_arr = np.array(pointer_list, dtype=np.int32) + self.attr_names = np.array(["vfield_" + field for field in value_fields]) + for field_name, value_type, attr_name in zip(value_fields, value_types, self.attr_names): + setattr(self, attr_name, np.array(value_lists[field_name], dtype=value_type)) + + def __getitem__(self, idx): + l_pointer = self.pointer_arr[idx] + r_pointer = self.pointer_arr[idx + 1] + ret_val = [getattr(self, attr_name)[l_pointer:r_pointer] for attr_name in self.attr_names] + return ret_val + + def __len__(self): + return self.length + +class AVAVideoDataset(data.Dataset): + def __init__(self, video_root, ann_file, remove_clips_without_annotations, frame_span, box_file=None, eval_file_paths={}, + box_thresh=0.0, action_thresh=0.0, transforms=None, object_file=None, object_transforms=None,): + + print('loading annotations into memory...') + tic = time.time() + json_dict = json.load(open(ann_file, 'r')) + assert type(json_dict) == dict, 'annotation file format {} not supported'.format(type(json_dict)) + print('Done (t={:0.2f}s)'.format(time.time() - tic)) + + self.video_root = video_root + self.transforms = transforms + self.frame_span = frame_span + + # These two attributes are used during ava evaluation... + # Maybe there is a better implementation + self.eval_file_paths = eval_file_paths + self.action_thresh = action_thresh + + clip2ann = defaultdict(list) + if "annotations" in json_dict: + for ann in json_dict["annotations"]: + action_ids = ann["action_ids"] + one_hot = np.zeros(81, dtype=np.bool) + one_hot[action_ids] = True + packed_act = np.packbits(one_hot[1:]) + clip2ann[ann["image_id"]].append(dict(bbox=ann["bbox"], packed_act=packed_act)) + + movies_size = {} + clips_info = {} + for img in json_dict["images"]: + mov = img["movie"] + if mov not in movies_size: + movies_size[mov] = [img["width"], img["height"]] + clips_info[img["id"]] = [mov, img["timestamp"]] + self.movie_info = NpInfoDict(movies_size, value_type=np.int32) + clip_ids = sorted(list(clips_info.keys())) + + if remove_clips_without_annotations: + clip_ids = [clip_id for clip_id in clip_ids if clip_id in clip2ann] + + if box_file: + # this is only for validation or testing + # we use detected boxes, so remove clips without boxes detected. + imgToBoxes = self.load_box_file(box_file, box_thresh) + clip_ids = [ + img_id + for img_id in clip_ids + if len(imgToBoxes[img_id]) > 0 + ] + self.det_persons = NpBoxDict(imgToBoxes, clip_ids, + value_types=[("bbox", np.float32), ("score", np.float32)]) + else: + self.det_persons = None + + if object_file: + imgToObjects = self.load_box_file(object_file) + self.det_objects = NpBoxDict(imgToObjects, clip_ids, + value_types=[("bbox", np.float32), ("score", np.float32)]) + else: + self.det_objects = None + + if object_transforms: + self.object_transforms = object_transforms + else: + self.object_transforms = None + + self.anns = NpBoxDict(clip2ann, clip_ids, value_types=[("bbox", np.float32), ("packed_act", np.uint8)]) + + clips_info = { + clip_id: + [ + self.movie_info.convert_key(clips_info[clip_id][0]), + clips_info[clip_id][1] + ] for clip_id in clip_ids + } + self.clips_info = NpInfoDict(clips_info, value_type=np.int32) + + + def __getitem__(self, idx): + + _, clip_info = self.clips_info[idx] + + # mov_id is the id in self.movie_info + mov_id, timestamp = clip_info + # movie_id is the human-readable youtube id. + movie_id, movie_size = self.movie_info[mov_id] + video_data = self._decode_video_data(movie_id, timestamp) + + im_w, im_h = movie_size + + if self.det_persons is None: + # Note: During training, we only use gt. Thus we should not provide box file, + # otherwise we will use only box file instead. + + boxes, packed_act = self.anns[idx] + + boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) # guard against no boxes + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + + # Decode the packed bits from uint8 to one hot, since AVA has 80 classes, + # it can be exactly denoted with 10 bytes, otherwise we may need to discard some bits. + one_hot_label = np.unpackbits(packed_act, axis=1) + one_hot_label = torch.as_tensor(one_hot_label, dtype=torch.uint8) + + boxes.add_field("labels", one_hot_label) + + else: + boxes, box_score = self.det_persons[idx] + boxes_tensor = torch.as_tensor(boxes).reshape(-1, 4) + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + + boxes = boxes.clip_to_image(remove_empty=True) + # extra fields + extras = {} + + if self.transforms is not None: + video_data, boxes, transform_randoms = self.transforms(video_data, boxes) + slow_video, fast_video = video_data + + objects = None + if self.det_objects is not None: + objects = self.get_objects(idx, im_w, im_h) + if self.object_transforms is not None: + objects = self.object_transforms(objects, transform_randoms) + + # add infos neccessary for memory feature + extras["movie_id"] = movie_id + extras["timestamp"] = timestamp + + return slow_video, fast_video, boxes, objects, extras, idx + + return video_data, boxes, idx, movie_id, timestamp + + def return_null_box(self, im_w, im_h): + return BoxList(torch.zeros((0, 4)), (im_w, im_h), mode="xyxy") + + def get_objects(self, idx, im_w, im_h): + obj_boxes = self.return_null_box(im_w, im_h) + if hasattr(self, 'det_objects'): + boxes, box_score = self.det_objects[idx] + + if len(box_score) == 0: + return obj_boxes + obj_boxes_tensor = torch.as_tensor(boxes).reshape(-1, 4) + obj_boxes = BoxList(obj_boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + + scores = torch.as_tensor(box_score) + obj_boxes.add_field("scores", scores) + + return obj_boxes + + def get_video_info(self, index): + _, clip_info = self.clips_info[index] + # mov_id is the id in self.movie_info + mov_id, timestamp = clip_info + # movie_id is the human-readable youtube id. + movie_id, movie_size = self.movie_info[mov_id] + w, h = movie_size + return dict(width=w, height=h, movie=movie_id, timestamp=timestamp) + + def load_box_file(self, box_file, score_thresh=0.0): + import json + + print('Loading box file into memory...') + tic = time.time() + with open(box_file, "r") as f: + box_results = json.load(f) + print('Done (t={:0.2f}s)'.format(time.time() - tic)) + + boxImgIds = [box['image_id'] for box in box_results] + + imgToBoxes = defaultdict(list) + for img_id, box in zip(boxImgIds, box_results): + if box['score'] >= score_thresh: + imgToBoxes[img_id].append(box) + return imgToBoxes + + def _decode_video_data(self, dirname, timestamp): + # decode target video data from segment per second. + + video_folder = os.path.join(self.video_root, dirname) + right_span = self.frame_span//2 + left_span = self.frame_span - right_span + + #load right + cur_t = timestamp + right_frames = [] + while len(right_frames)= score_thresh) + boxes = boxes[box_ids, :] + scores = scores[box_ids, action_ids] + action_ids = action_ids + 1 + + movie_name = video_info['movie'] + timestamp = video_info['timestamp'] + + clip_key = make_image_key(movie_name, timestamp) + + ava_results[clip_key] = { + "boxes": boxes, + "scores": scores, + "action_ids": action_ids + } + return ava_results + + +def read_exclusions(exclusions_file): + """Reads a CSV file of excluded timestamps. + + Args: + exclusions_file: Path of file containing a csv of video-id,timestamp. + + Returns: + A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", + or an empty set if exclusions file is None. + """ + excluded = set() + exclusions_file = open(exclusions_file, 'r') + if exclusions_file: + reader = csv.reader(exclusions_file) + for row in reader: + assert len(row) == 2, "Expected only 2 columns, got: {}".format(row) + excluded.add(make_image_key(row[0], row[1])) + return excluded + + +def read_labelmap(labelmap_file): + """Reads a labelmap without the dependency on protocol buffers. + + Args: + labelmap_file: Path of file containing a label map protocol buffer. + + Returns: + labelmap: The label map in the form used by the object_detection_evaluation + module - a list of {"id": integer, "name": classname } dicts. + class_ids: A set containing all of the valid class id integers. + """ + labelmap = [] + class_ids = set() + name = "" + class_id = "" + labelmap_file = open(labelmap_file, 'r') + for line in labelmap_file: + if line.startswith(" name:"): + name = line.split('"')[1] + elif line.startswith(" id:") or line.startswith(" label_id:"): + class_id = int(line.strip().split(" ")[-1]) + labelmap.append({"id": class_id, "name": name}) + class_ids.add(class_id) + return labelmap, class_ids + + +def read_csv(csv_file, logger, class_whitelist=None): + """Loads boxes and class labels from a CSV file in the AVA format. + + CSV file format described at https://research.google.com/ava/download.html. + + Args: + csv_file: Path of csv file. + class_whitelist: If provided, boxes corresponding to (integer) class labels + not in this set are skipped. + + Returns: + boxes: A dictionary mapping each unique image key (string) to a list of + boxes, given as coordinates [y1, x1, y2, x2]. + labels: A dictionary mapping each unique image key (string) to a list of + integer class lables, matching the corresponding box in `boxes`. + scores: A dictionary mapping each unique image key (string) to a list of + score values lables, matching the corresponding label in `labels`. If + scores are not provided in the csv, then they will default to 1.0. + """ + start = time.time() + boxes = defaultdict(list) + labels = defaultdict(list) + scores = defaultdict(list) + csv_file = open(csv_file, 'r') + reader = csv.reader(csv_file) + for row in reader: + assert len(row) in [7, 8], "Wrong number of columns: " + row + image_key = make_image_key(row[0], row[1]) + x1, y1, x2, y2 = [float(n) for n in row[2:6]] + action_id = int(row[6]) + if class_whitelist and action_id not in class_whitelist: + continue + score = 1.0 + if len(row) == 8: + score = float(row[7]) + boxes[image_key].append([y1, x1, y2, x2]) + labels[image_key].append(action_id) + scores[image_key].append(score) + print_time(logger, "read file " + csv_file.name, start) + return boxes, labels, scores + + +def write_csv(ava_results, csv_result_file, logger): + start = time.time() + with open(csv_result_file, 'w') as csv_file: + spamwriter = csv.writer(csv_file, delimiter=',') + for clip_key in ava_results: + movie_name, timestamp = decode_image_key(clip_key) + cur_result = ava_results[clip_key] + boxes = cur_result["boxes"] + scores = cur_result["scores"] + action_ids = cur_result["action_ids"] + assert boxes.shape[0] == scores.shape[0] == action_ids.shape[0] + for box, score, action_id in zip(boxes, scores, action_ids): + box_str = ['{:.5f}'.format(cord) for cord in box] + score_str = '{:.5f}'.format(score) + spamwriter.writerow([movie_name, timestamp, ] + box_str + [action_id, score_str]) + print_time(logger, "write file " + csv_result_file, start) + + +def print_time(logger, message, start): + logger.info("==> %g seconds to %s", time.time() - start, message) + + +def evaluate_predictions_on_ava(eval_file_paths, ava_results, csv_result_file, logger): + write_csv(ava_results, csv_result_file, logger) + + groundtruth = eval_file_paths["csv_gt_file"] + labelmap = eval_file_paths["labelmap_file"] + exclusions = eval_file_paths["exclusion_file"] + + categories, class_whitelist = read_labelmap(labelmap) + logger.info("CATEGORIES (%d):\n%s", len(categories), + pformat(categories, indent=2)) + excluded_keys = read_exclusions(exclusions) + + pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator( + categories) + + # Reads the ground truth dataset. + boxes, labels, _ = read_csv(groundtruth, logger, class_whitelist) + start = time.time() + for image_key in boxes: + if image_key in excluded_keys: + logger.info(("Found excluded timestamp in ground truth: %s. " + "It will be ignored."), image_key) + continue + pascal_evaluator.add_single_ground_truth_image_info( + image_key, { + standard_fields.InputDataFields.groundtruth_boxes: + np.array(boxes[image_key], dtype=float), + standard_fields.InputDataFields.groundtruth_classes: + np.array(labels[image_key], dtype=int), + standard_fields.InputDataFields.groundtruth_difficult: + np.zeros(len(boxes[image_key]), dtype=bool) + }) + print_time(logger, "convert groundtruth", start) + + # Reads detections dataset. + boxes, labels, scores = read_csv(csv_result_file, logger, class_whitelist) + start = time.time() + for image_key in boxes: + if image_key in excluded_keys: + logger.info(("Found excluded timestamp in detections: %s. " + "It will be ignored."), image_key) + continue + pascal_evaluator.add_single_detected_image_info( + image_key, { + standard_fields.DetectionResultFields.detection_boxes: + np.array(boxes[image_key], dtype=float), + standard_fields.DetectionResultFields.detection_classes: + np.array(labels[image_key], dtype=int), + standard_fields.DetectionResultFields.detection_scores: + np.array(scores[image_key], dtype=float) + }) + print_time(logger, "convert detections", start) + + start = time.time() + metrics = pascal_evaluator.evaluate() + print_time(logger, "run_evaluator", start) + return metrics diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..5a083f1 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..42a5ad8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/label_map_util.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/label_map_util.cpython-37.pyc new file mode 100644 index 0000000..2b25157 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/label_map_util.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/label_map_util.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/label_map_util.cpython-39.pyc new file mode 100644 index 0000000..0f30d9c Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/label_map_util.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/metrics.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000..4f46cb3 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/metrics.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/metrics.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/metrics.cpython-39.pyc new file mode 100644 index 0000000..6f44f3e Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/metrics.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list.cpython-37.pyc new file mode 100644 index 0000000..a2feb1b Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list.cpython-39.pyc new file mode 100644 index 0000000..5e329f0 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list_ops.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list_ops.cpython-37.pyc new file mode 100644 index 0000000..e51dfb5 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list_ops.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list_ops.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list_ops.cpython-39.pyc new file mode 100644 index 0000000..315e80f Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_list_ops.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list.cpython-37.pyc new file mode 100644 index 0000000..4bb9cc0 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list.cpython-39.pyc new file mode 100644 index 0000000..12c611f Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list_ops.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list_ops.cpython-37.pyc new file mode 100644 index 0000000..f9e58e8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list_ops.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list_ops.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list_ops.cpython-39.pyc new file mode 100644 index 0000000..6422d0c Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_mask_list_ops.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_ops.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_ops.cpython-37.pyc new file mode 100644 index 0000000..24dfa73 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_ops.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_ops.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_ops.cpython-39.pyc new file mode 100644 index 0000000..c6ec186 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_box_ops.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_mask_ops.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_mask_ops.cpython-37.pyc new file mode 100644 index 0000000..1c45935 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_mask_ops.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_mask_ops.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_mask_ops.cpython-39.pyc new file mode 100644 index 0000000..5bbc527 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/np_mask_ops.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/object_detection_evaluation.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/object_detection_evaluation.cpython-37.pyc new file mode 100644 index 0000000..8e4f8b0 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/object_detection_evaluation.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/object_detection_evaluation.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/object_detection_evaluation.cpython-39.pyc new file mode 100644 index 0000000..4855927 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/object_detection_evaluation.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/per_image_evaluation.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/per_image_evaluation.cpython-37.pyc new file mode 100644 index 0000000..6895236 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/per_image_evaluation.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/per_image_evaluation.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/per_image_evaluation.cpython-39.pyc new file mode 100644 index 0000000..9c13349 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/per_image_evaluation.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/standard_fields.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/standard_fields.cpython-37.pyc new file mode 100644 index 0000000..5cfb9d6 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/standard_fields.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/standard_fields.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/standard_fields.cpython-39.pyc new file mode 100644 index 0000000..71614c9 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/__pycache__/standard_fields.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/label_map_util.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/label_map_util.py new file mode 100644 index 0000000..e6a8e22 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/label_map_util.py @@ -0,0 +1,177 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Label map utility functions.""" + +import logging +logger = logging.getLogger("alphaction.inference") + +# from google.protobuf import text_format +# from google3.third_party.tensorflow_models.object_detection.protos import string_int_label_map_pb2 + + +def _validate_label_map(label_map): + """Checks if a label map is valid. + + Args: + label_map: StringIntLabelMap to validate. + + Raises: + ValueError: if label map is invalid. + """ + for item in label_map.item: + if item.id < 1: + raise ValueError('Label map ids should be >= 1.') + + +def create_category_index(categories): + """Creates dictionary of COCO compatible categories keyed by category id. + + Args: + categories: a list of dicts, each of which has the following keys: + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza'. + + Returns: + category_index: a dict containing the same entries as categories, but keyed + by the 'id' field of each category. + """ + category_index = {} + for cat in categories: + category_index[cat['id']] = cat + return category_index + + +def get_max_label_map_index(label_map): + """Get maximum index in label map. + + Args: + label_map: a StringIntLabelMapProto + + Returns: + an integer + """ + return max([item.id for item in label_map.item]) + + +def convert_label_map_to_categories(label_map, + max_num_classes, + use_display_name=True): + """Loads label map proto and returns categories list compatible with eval. + + This function loads a label map and returns a list of dicts, each of which + has the following keys: + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name + e.g., 'cat', 'dog', 'pizza'. + We only allow class into the list if its id-label_id_offset is + between 0 (inclusive) and max_num_classes (exclusive). + If there are several items mapping to the same id in the label map, + we will only keep the first one in the categories list. + + Args: + label_map: a StringIntLabelMapProto or None. If None, a default categories + list is created with max_num_classes categories. + max_num_classes: maximum number of (consecutive) label indices to include. + use_display_name: (boolean) choose whether to load 'display_name' field + as category name. If False or if the display_name field does not exist, + uses 'name' field as category names instead. + Returns: + categories: a list of dictionaries representing all possible categories. + """ + categories = [] + list_of_ids_already_added = [] + if not label_map: + label_id_offset = 1 + for class_id in range(max_num_classes): + categories.append({ + 'id': class_id + label_id_offset, + 'name': 'category_{}'.format(class_id + label_id_offset) + }) + return categories + for item in label_map.item: + if not 0 < item.id <= max_num_classes: + logger.info('Ignore item %d since it falls outside of requested ' + 'label range.', item.id) + continue + if use_display_name and item.HasField('display_name'): + name = item.display_name + else: + name = item.name + if item.id not in list_of_ids_already_added: + list_of_ids_already_added.append(item.id) + categories.append({'id': item.id, 'name': name}) + return categories + + +def load_labelmap(path): + """Loads label map proto. + + Args: + path: path to StringIntLabelMap proto text file. + Returns: + a StringIntLabelMapProto + """ + with open(path, 'r') as fid: + label_map_string = fid.read() + label_map = string_int_label_map_pb2.StringIntLabelMap() + try: + text_format.Merge(label_map_string, label_map) + except text_format.ParseError: + label_map.ParseFromString(label_map_string) + _validate_label_map(label_map) + return label_map + + +def get_label_map_dict(label_map_path, use_display_name=False): + """Reads a label map and returns a dictionary of label names to id. + + Args: + label_map_path: path to label_map. + use_display_name: whether to use the label map items' display names as keys. + + Returns: + A dictionary mapping label names to id. + """ + label_map = load_labelmap(label_map_path) + label_map_dict = {} + for item in label_map.item: + if use_display_name: + label_map_dict[item.display_name] = item.id + else: + label_map_dict[item.name] = item.id + return label_map_dict + + +def create_category_index_from_labelmap(label_map_path): + """Reads a label map and returns a category index. + + Args: + label_map_path: Path to `StringIntLabelMap` proto text file. + + Returns: + A category index, which is a dictionary that maps integer ids to dicts + containing categories, e.g. + {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...} + """ + label_map = load_labelmap(label_map_path) + max_num_classes = max(item.id for item in label_map.item) + categories = convert_label_map_to_categories(label_map, max_num_classes) + return create_category_index(categories) + + +def create_class_agnostic_category_index(): + """Creates a category index with a single `object` class.""" + return {1: {'id': 1, 'name': 'object'}} diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/metrics.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/metrics.py new file mode 100644 index 0000000..e8ae886 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/metrics.py @@ -0,0 +1,145 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Functions for computing metrics like precision, recall, CorLoc and etc.""" +from __future__ import division + +import numpy as np + + +def compute_precision_recall(scores, labels, num_gt): + """Compute precision and recall. + + Args: + scores: A float numpy array representing detection score + labels: A boolean numpy array representing true/false positive labels + num_gt: Number of ground truth instances + + Raises: + ValueError: if the input is not of the correct format + + Returns: + precision: Fraction of positive instances over detected ones. This value is + None if no ground truth labels are present. + recall: Fraction of detected positive instance over all positive instances. + This value is None if no ground truth labels are present. + + """ + if not isinstance( + labels, np.ndarray) or labels.dtype != np.bool or len(labels.shape) != 1: + raise ValueError("labels must be single dimension bool numpy array") + + if not isinstance( + scores, np.ndarray) or len(scores.shape) != 1: + raise ValueError("scores must be single dimension numpy array") + + if num_gt < np.sum(labels): + raise ValueError("Number of true positives must be smaller than num_gt.") + + if len(scores) != len(labels): + raise ValueError("scores and labels must be of the same size.") + + if num_gt == 0: + return None, None + + sorted_indices = np.argsort(scores) + sorted_indices = sorted_indices[::-1] + labels = labels.astype(int) + true_positive_labels = labels[sorted_indices] + false_positive_labels = 1 - true_positive_labels + cum_true_positives = np.cumsum(true_positive_labels) + cum_false_positives = np.cumsum(false_positive_labels) + precision = cum_true_positives.astype(float) / ( + cum_true_positives + cum_false_positives) + recall = cum_true_positives.astype(float) / num_gt + return precision, recall + + +def compute_average_precision(precision, recall): + """Compute Average Precision according to the definition in VOCdevkit. + + Precision is modified to ensure that it does not decrease as recall + decrease. + + Args: + precision: A float [N, 1] numpy array of precisions + recall: A float [N, 1] numpy array of recalls + + Raises: + ValueError: if the input is not of the correct format + + Returns: + average_precison: The area under the precision recall curve. NaN if + precision and recall are None. + + """ + if precision is None: + if recall is not None: + raise ValueError("If precision is None, recall must also be None") + return np.NAN + + if not isinstance(precision, np.ndarray) or not isinstance(recall, + np.ndarray): + raise ValueError("precision and recall must be numpy array") + if precision.dtype != np.float or recall.dtype != np.float: + raise ValueError("input must be float numpy array.") + if len(precision) != len(recall): + raise ValueError("precision and recall must be of the same size.") + if not precision.size: + return 0.0 + if np.amin(precision) < 0 or np.amax(precision) > 1: + raise ValueError("Precision must be in the range of [0, 1].") + if np.amin(recall) < 0 or np.amax(recall) > 1: + raise ValueError("recall must be in the range of [0, 1].") + if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)): + raise ValueError("recall must be a non-decreasing array") + + recall = np.concatenate([[0], recall, [1]]) + precision = np.concatenate([[0], precision, [0]]) + + # Preprocess precision to be a non-decreasing array + for i in range(len(precision) - 2, -1, -1): + precision[i] = np.maximum(precision[i], precision[i + 1]) + + indices = np.where(recall[1:] != recall[:-1])[0] + 1 + average_precision = np.sum( + (recall[indices] - recall[indices - 1]) * precision[indices]) + return average_precision + + +def compute_cor_loc(num_gt_imgs_per_class, + num_images_correctly_detected_per_class): + """Compute CorLoc according to the definition in the following paper. + + https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf + + Returns nans if there are no ground truth images for a class. + + Args: + num_gt_imgs_per_class: 1D array, representing number of images containing + at least one object instance of a particular class + num_images_correctly_detected_per_class: 1D array, representing number of + images that are correctly detected at least one object instance of a + particular class + + Returns: + corloc_per_class: A float numpy array represents the corloc score of each + class + """ + # Divide by zero expected for classes with no gt examples. + with np.errstate(divide="ignore", invalid="ignore"): + return np.where( + num_gt_imgs_per_class == 0, np.nan, + num_images_correctly_detected_per_class / num_gt_imgs_per_class) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_list.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_list.py new file mode 100644 index 0000000..a755cc5 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_list.py @@ -0,0 +1,133 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Numpy BoxList classes and functions.""" + +import numpy as np + + +class BoxList(object): + """Box collection. + + BoxList represents a list of bounding boxes as numpy array, where each + bounding box is represented as a row of 4 numbers, + [y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a + given list correspond to a single image. + + Optionally, users can add additional related fields (such as + objectness/classification scores). + """ + + def __init__(self, data): + """Constructs box collection. + + Args: + data: a numpy array of shape [N, 4] representing box coordinates + + Raises: + ValueError: if bbox dataset is not a numpy array + ValueError: if invalid dimensions for bbox dataset + """ + if not isinstance(data, np.ndarray): + raise ValueError('dataset must be a numpy array.') + if len(data.shape) != 2 or data.shape[1] != 4: + raise ValueError('Invalid dimensions for box dataset.') + if data.dtype != np.float32 and data.dtype != np.float64: + raise ValueError('Invalid dataset type for box dataset: float is required.') + if not self._is_valid_boxes(data): + raise ValueError('Invalid box dataset. dataset must be a numpy array of ' + 'N*[y_min, x_min, y_max, x_max]') + self.data = {'boxes': data} + + def num_boxes(self): + """Return number of boxes held in collections.""" + return self.data['boxes'].shape[0] + + def get_extra_fields(self): + """Return all non-box fields.""" + return [k for k in self.data.keys() if k != 'boxes'] + + def has_field(self, field): + return field in self.data + + def add_field(self, field, field_data): + """Add dataset to a specified field. + + Args: + field: a string parameter used to speficy a related field to be accessed. + field_data: a numpy array of [N, ...] representing the dataset associated + with the field. + Raises: + ValueError: if the field is already exist or the dimension of the field + dataset does not matches the number of boxes. + """ + if self.has_field(field): + raise ValueError('Field ' + field + 'already exists') + if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes(): + raise ValueError('Invalid dimensions for field dataset') + self.data[field] = field_data + + def get(self): + """Convenience function for accesssing box coordinates. + + Returns: + a numpy array of shape [N, 4] representing box corners + """ + return self.get_field('boxes') + + def get_field(self, field): + """Accesses dataset associated with the specified field in the box collection. + + Args: + field: a string parameter used to speficy a related field to be accessed. + + Returns: + a numpy 1-d array representing dataset of an associated field + + Raises: + ValueError: if invalid field + """ + if not self.has_field(field): + raise ValueError('field {} does not exist'.format(field)) + return self.data[field] + + def get_coordinates(self): + """Get corner coordinates of boxes. + + Returns: + a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max] + """ + box_coordinates = self.get() + y_min = box_coordinates[:, 0] + x_min = box_coordinates[:, 1] + y_max = box_coordinates[:, 2] + x_max = box_coordinates[:, 3] + return [y_min, x_min, y_max, x_max] + + def _is_valid_boxes(self, data): + """Check whether dataset fullfills the format of N*[ymin, xmin, ymax, xmin]. + + Args: + data: a numpy array of shape [N, 4] representing box coordinates + + Returns: + a boolean indicating whether all ymax of boxes are equal or greater than + ymin, and all xmax of boxes are equal or greater than xmin. + """ + if data.shape[0] > 0: + for i in range(data.shape[0]): + if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]: + return False + return True diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_list_ops.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_list_ops.py new file mode 100644 index 0000000..062ecdb --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_list_ops.py @@ -0,0 +1,553 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Bounding Box List operations for Numpy BoxLists. + +Example box operations that are supported: + * Areas: compute bounding box areas + * IOU: pairwise intersection-over-union scores +""" +import numpy as np + +from . import np_box_list, np_box_ops + + +class SortOrder(object): + """Enum class for sort order. + + Attributes: + ascend: ascend order. + descend: descend order. + """ + ASCEND = 1 + DESCEND = 2 + + +def area(boxlist): + """Computes area of boxes. + + Args: + boxlist: BoxList holding N boxes + + Returns: + a numpy array with shape [N*1] representing box areas + """ + y_min, x_min, y_max, x_max = boxlist.get_coordinates() + return (y_max - y_min) * (x_max - x_min) + + +def intersection(boxlist1, boxlist2): + """Compute pairwise intersection areas between boxes. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + + Returns: + a numpy array with shape [N*M] representing pairwise intersection area + """ + return np_box_ops.intersection(boxlist1.get(), boxlist2.get()) + + +def iou(boxlist1, boxlist2): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + + Returns: + a numpy array with shape [N, M] representing pairwise iou scores. + """ + return np_box_ops.iou(boxlist1.get(), boxlist2.get()) + + +def ioa(boxlist1, boxlist2): + """Computes pairwise intersection-over-area between box collections. + + Intersection-over-area (ioa) between two boxes box1 and box2 is defined as + their intersection area over box2's area. Note that ioa is not symmetric, + that is, IOA(box1, box2) != IOA(box2, box1). + + Args: + boxlist1: BoxList holding N boxes + boxlist2: BoxList holding M boxes + + Returns: + a numpy array with shape [N, M] representing pairwise ioa scores. + """ + return np_box_ops.ioa(boxlist1.get(), boxlist2.get()) + + +def gather(boxlist, indices, fields=None): + """Gather boxes from BoxList according to indices and return new BoxList. + + By default, gather returns boxes corresponding to the input index list, as + well as all additional fields stored in the boxlist (indexing into the + first dimension). However one can optionally only gather from a + subset of fields. + + Args: + boxlist: BoxList holding N boxes + indices: a 1-d numpy array of type int_ + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + + Returns: + subboxlist: a BoxList corresponding to the subset of the input BoxList + specified by indices + + Raises: + ValueError: if specified field is not contained in boxlist or if the + indices are not of type int_ + """ + if indices.size: + if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0: + raise ValueError('indices are out of valid range.') + subboxlist = np_box_list.BoxList(boxlist.get()[indices, :]) + if fields is None: + fields = boxlist.get_extra_fields() + for field in fields: + extra_field_data = boxlist.get_field(field) + subboxlist.add_field(field, extra_field_data[indices, ...]) + return subboxlist + + +def sort_by_field(boxlist, field, order=SortOrder.DESCEND): + """Sort boxes and associated fields according to a scalar field. + + A common use case is reordering the boxes according to descending scores. + + Args: + boxlist: BoxList holding N boxes. + field: A BoxList field for sorting and reordering the BoxList. + order: (Optional) 'descend' or 'ascend'. Default is descend. + + Returns: + sorted_boxlist: A sorted BoxList with the field in the specified order. + + Raises: + ValueError: if specified field does not exist or is not of single dimension. + ValueError: if the order is not either descend or ascend. + """ + if not boxlist.has_field(field): + raise ValueError('Field ' + field + ' does not exist') + if len(boxlist.get_field(field).shape) != 1: + raise ValueError('Field ' + field + 'should be single dimension.') + if order != SortOrder.DESCEND and order != SortOrder.ASCEND: + raise ValueError('Invalid sort order') + + field_to_sort = boxlist.get_field(field) + sorted_indices = np.argsort(field_to_sort) + if order == SortOrder.DESCEND: + sorted_indices = sorted_indices[::-1] + return gather(boxlist, sorted_indices) + + +def non_max_suppression(boxlist, + max_output_size=10000, + iou_threshold=1.0, + score_threshold=-10.0): + """Non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. In each iteration, the detected bounding box with + highest score in the available pool is selected. + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. All scores belong to the same class. + max_output_size: maximum number of retained boxes + iou_threshold: intersection over union threshold. + score_threshold: minimum score threshold. Remove the boxes with scores + less than this value. Default value is set to -10. A very + low threshold to pass pretty much all the boxes, unless + the user sets a different score threshold. + + Returns: + a BoxList holding M boxes where M <= max_output_size + Raises: + ValueError: if 'scores' field does not exist + ValueError: if threshold is not in [0, 1] + ValueError: if max_output_size < 0 + """ + if not boxlist.has_field('scores'): + raise ValueError('Field scores does not exist') + if iou_threshold < 0. or iou_threshold > 1.0: + raise ValueError('IOU threshold must be in [0, 1]') + if max_output_size < 0: + raise ValueError('max_output_size must be bigger than 0.') + + boxlist = filter_scores_greater_than(boxlist, score_threshold) + if boxlist.num_boxes() == 0: + return boxlist + + boxlist = sort_by_field(boxlist, 'scores') + + # Prevent further computation if NMS is disabled. + if iou_threshold == 1.0: + if boxlist.num_boxes() > max_output_size: + selected_indices = np.arange(max_output_size) + return gather(boxlist, selected_indices) + else: + return boxlist + + boxes = boxlist.get() + num_boxes = boxlist.num_boxes() + # is_index_valid is True only for all remaining valid boxes, + is_index_valid = np.full(num_boxes, 1, dtype=bool) + selected_indices = [] + num_output = 0 + for i in range(num_boxes): + if num_output < max_output_size: + if is_index_valid[i]: + num_output += 1 + selected_indices.append(i) + is_index_valid[i] = False + valid_indices = np.where(is_index_valid)[0] + if valid_indices.size == 0: + break + + intersect_over_union = np_box_ops.iou( + np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :]) + intersect_over_union = np.squeeze(intersect_over_union, axis=0) + is_index_valid[valid_indices] = np.logical_and( + is_index_valid[valid_indices], + intersect_over_union <= iou_threshold) + return gather(boxlist, np.array(selected_indices)) + + +def multi_class_non_max_suppression(boxlist, score_thresh, iou_thresh, + max_output_size): + """Multi-class version of non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. It operates independently for each class for + which scores are provided (via the scores field of the input box_list), + pruning boxes with score less than a provided threshold prior to + applying NMS. + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. This scores field is a tensor that can + be 1 dimensional (in the case of a single class) or 2-dimensional, which + which case we assume that it takes the shape [num_boxes, num_classes]. + We further assume that this rank is known statically and that + scores.shape[1] is also known (i.e., the number of classes is fixed + and known at graph construction time). + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap + with previously selected boxes are removed). + max_output_size: maximum number of retained boxes per class. + + Returns: + a BoxList holding M boxes with a rank-1 scores field representing + corresponding scores for each box with scores sorted in decreasing order + and a rank-1 classes field representing a class label for each box. + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have + a valid scores field. + """ + if not 0 <= iou_thresh <= 1.0: + raise ValueError('thresh must be between 0 and 1') + if not isinstance(boxlist, np_box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + scores = boxlist.get_field('scores') + if len(scores.shape) == 1: + scores = np.reshape(scores, [-1, 1]) + elif len(scores.shape) == 2: + if scores.shape[1] is None: + raise ValueError('scores field must have statically defined second ' + 'dimension') + else: + raise ValueError('scores field must be of rank 1 or 2') + num_boxes = boxlist.num_boxes() + num_scores = scores.shape[0] + num_classes = scores.shape[1] + + if num_boxes != num_scores: + raise ValueError('Incorrect scores field length: actual vs expected.') + + selected_boxes_list = [] + for class_idx in range(num_classes): + boxlist_and_class_scores = np_box_list.BoxList(boxlist.get()) + class_scores = np.reshape(scores[0:num_scores, class_idx], [-1]) + boxlist_and_class_scores.add_field('scores', class_scores) + boxlist_filt = filter_scores_greater_than(boxlist_and_class_scores, + score_thresh) + nms_result = non_max_suppression(boxlist_filt, + max_output_size=max_output_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + nms_result.add_field( + 'classes', np.zeros_like(nms_result.get_field('scores')) + class_idx) + selected_boxes_list.append(nms_result) + selected_boxes = concatenate(selected_boxes_list) + sorted_boxes = sort_by_field(selected_boxes, 'scores') + return sorted_boxes + + +def scale(boxlist, y_scale, x_scale): + """Scale box coordinates in x and y dimensions. + + Args: + boxlist: BoxList holding N boxes + y_scale: float + x_scale: float + + Returns: + boxlist: BoxList holding N boxes + """ + y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) + y_min = y_scale * y_min + y_max = y_scale * y_max + x_min = x_scale * x_min + x_max = x_scale * x_max + scaled_boxlist = np_box_list.BoxList(np.hstack([y_min, x_min, y_max, x_max])) + + fields = boxlist.get_extra_fields() + for field in fields: + extra_field_data = boxlist.get_field(field) + scaled_boxlist.add_field(field, extra_field_data) + + return scaled_boxlist + + +def clip_to_window(boxlist, window): + """Clip bounding boxes to a window. + + This op clips input bounding boxes (represented by bounding box + corners) to a window, optionally filtering out boxes that do not + overlap at all with the window. + + Args: + boxlist: BoxList holding M_in boxes + window: a numpy array of shape [4] representing the + [y_min, x_min, y_max, x_max] window to which the op + should clip boxes. + + Returns: + a BoxList holding M_out boxes where M_out <= M_in + """ + y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) + win_y_min = window[0] + win_x_min = window[1] + win_y_max = window[2] + win_x_max = window[3] + y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min) + y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min) + x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min) + x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min) + clipped = np_box_list.BoxList( + np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped])) + clipped = _copy_extra_fields(clipped, boxlist) + areas = area(clipped) + nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)), + [-1]).astype(np.int32) + return gather(clipped, nonzero_area_indices) + + +def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0): + """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. + + For each box in boxlist1, we want its IOA to be more than minoverlap with + at least one of the boxes in boxlist2. If it does not, we remove it. + + Args: + boxlist1: BoxList holding N boxes. + boxlist2: BoxList holding M boxes. + minoverlap: Minimum required overlap between boxes, to count them as + overlapping. + + Returns: + A pruned boxlist with size [N', 4]. + """ + intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor + intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor + keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) + keep_inds = np.nonzero(keep_bool)[0] + new_boxlist1 = gather(boxlist1, keep_inds) + return new_boxlist1 + + +def prune_outside_window(boxlist, window): + """Prunes bounding boxes that fall outside a given window. + + This function prunes bounding boxes that even partially fall outside the given + window. See also ClipToWindow which only prunes bounding boxes that fall + completely outside the window, and clips any bounding boxes that partially + overflow. + + Args: + boxlist: a BoxList holding M_in boxes. + window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax] + of the window. + + Returns: + pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in. + valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes + in the input tensor. + """ + + y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) + win_y_min = window[0] + win_x_min = window[1] + win_y_max = window[2] + win_x_max = window[3] + coordinate_violations = np.hstack([np.less(y_min, win_y_min), + np.less(x_min, win_x_min), + np.greater(y_max, win_y_max), + np.greater(x_max, win_x_max)]) + valid_indices = np.reshape( + np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1]) + return gather(boxlist, valid_indices), valid_indices + + +def concatenate(boxlists, fields=None): + """Concatenate list of BoxLists. + + This op concatenates a list of input BoxLists into a larger BoxList. It also + handles concatenation of BoxList fields as long as the field tensor shapes + are equal except for the first dimension. + + Args: + boxlists: list of BoxList objects + fields: optional list of fields to also concatenate. By default, all + fields from the first BoxList in the list are included in the + concatenation. + + Returns: + a BoxList with number of boxes equal to + sum([boxlist.num_boxes() for boxlist in BoxList]) + Raises: + ValueError: if boxlists is invalid (i.e., is not a list, is empty, or + contains non BoxList objects), or if requested fields are not contained in + all boxlists + """ + if not isinstance(boxlists, list): + raise ValueError('boxlists should be a list') + if not boxlists: + raise ValueError('boxlists should have nonzero length') + for boxlist in boxlists: + if not isinstance(boxlist, np_box_list.BoxList): + raise ValueError('all elements of boxlists should be BoxList objects') + concatenated = np_box_list.BoxList( + np.vstack([boxlist.get() for boxlist in boxlists])) + if fields is None: + fields = boxlists[0].get_extra_fields() + for field in fields: + first_field_shape = boxlists[0].get_field(field).shape + first_field_shape = first_field_shape[1:] + for boxlist in boxlists: + if not boxlist.has_field(field): + raise ValueError('boxlist must contain all requested fields') + field_shape = boxlist.get_field(field).shape + field_shape = field_shape[1:] + if field_shape != first_field_shape: + raise ValueError('field %s must have same shape for all boxlists ' + 'except for the 0th dimension.' % field) + concatenated_field = np.concatenate( + [boxlist.get_field(field) for boxlist in boxlists], axis=0) + concatenated.add_field(field, concatenated_field) + return concatenated + + +def filter_scores_greater_than(boxlist, thresh): + """Filter to keep only boxes with score exceeding a given threshold. + + This op keeps the collection of boxes whose corresponding scores are + greater than the input threshold. + + Args: + boxlist: BoxList holding N boxes. Must contain a 'scores' field + representing detection scores. + thresh: scalar threshold + + Returns: + a BoxList holding M boxes where M <= N + + Raises: + ValueError: if boxlist not a BoxList object or if it does not + have a scores field + """ + if not isinstance(boxlist, np_box_list.BoxList): + raise ValueError('boxlist must be a BoxList') + if not boxlist.has_field('scores'): + raise ValueError('input boxlist must have \'scores\' field') + scores = boxlist.get_field('scores') + if len(scores.shape) > 2: + raise ValueError('Scores should have rank 1 or 2') + if len(scores.shape) == 2 and scores.shape[1] != 1: + raise ValueError('Scores should have rank 1 or have shape ' + 'consistent with [None, 1]') + high_score_indices = np.reshape(np.where(np.greater(scores, thresh)), + [-1]).astype(np.int32) + return gather(boxlist, high_score_indices) + + +def change_coordinate_frame(boxlist, window): + """Change coordinate frame of the boxlist to be relative to window's frame. + + Given a window of the form [ymin, xmin, ymax, xmax], + changes bounding box coordinates from boxlist to be relative to this window + (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). + + An example use case is dataset augmentation: where we are given groundtruth + boxes (boxlist) and would like to randomly crop the image to some + window (window). In this case we need to change the coordinate frame of + each groundtruth box to be relative to this new window. + + Args: + boxlist: A BoxList object holding N boxes. + window: a size 4 1-D numpy array. + + Returns: + Returns a BoxList object with N boxes. + """ + win_height = window[2] - window[0] + win_width = window[3] - window[1] + boxlist_new = scale( + np_box_list.BoxList(boxlist.get() - + [window[0], window[1], window[0], window[1]]), + 1.0 / win_height, 1.0 / win_width) + _copy_extra_fields(boxlist_new, boxlist) + + return boxlist_new + + +def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): + """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. + + Args: + boxlist_to_copy_to: BoxList to which extra fields are copied. + boxlist_to_copy_from: BoxList from which fields are copied. + + Returns: + boxlist_to_copy_to with extra fields. + """ + for field in boxlist_to_copy_from.get_extra_fields(): + boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) + return boxlist_to_copy_to + + +def _update_valid_indices_by_removing_high_iou_boxes( + selected_indices, is_index_valid, intersect_over_union, threshold): + max_iou = np.max(intersect_over_union[:, selected_indices], axis=1) + return np.logical_and(is_index_valid, max_iou <= threshold) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_mask_list.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_mask_list.py new file mode 100644 index 0000000..303e062 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_mask_list.py @@ -0,0 +1,63 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Numpy BoxMaskList classes and functions.""" + +import numpy as np +from . import np_box_list + + +class BoxMaskList(np_box_list.BoxList): + """Convenience wrapper for BoxList with masks. + + BoxMaskList extends the np_box_list.BoxList to contain masks as well. + In particular, its constructor receives both boxes and masks. Note that the + masks correspond to the full image. + """ + + def __init__(self, box_data, mask_data): + """Constructs box collection. + + Args: + box_data: a numpy array of shape [N, 4] representing box coordinates + mask_data: a numpy array of shape [N, height, width] representing masks + with values are in {0,1}. The masks correspond to the full + image. The height and the width will be equal to image height and width. + + Raises: + ValueError: if bbox dataset is not a numpy array + ValueError: if invalid dimensions for bbox dataset + ValueError: if mask dataset is not a numpy array + ValueError: if invalid dimension for mask dataset + """ + super(BoxMaskList, self).__init__(box_data) + if not isinstance(mask_data, np.ndarray): + raise ValueError('Mask dataset must be a numpy array.') + if len(mask_data.shape) != 3: + raise ValueError('Invalid dimensions for mask dataset.') + if mask_data.dtype != np.uint8: + raise ValueError('Invalid dataset type for mask dataset: uint8 is required.') + if mask_data.shape[0] != box_data.shape[0]: + raise ValueError('There should be the same number of boxes and masks.') + self.data['masks'] = mask_data + + def get_masks(self): + """Convenience function for accessing masks. + + Returns: + a numpy array of shape [N, height, width] representing masks + """ + return self.get_field('masks') + diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_mask_list_ops.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_mask_list_ops.py new file mode 100644 index 0000000..10a9a72 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_mask_list_ops.py @@ -0,0 +1,397 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Operations for np_box_mask_list.BoxMaskList. + +Example box operations that are supported: + * Areas: compute bounding box areas + * IOU: pairwise intersection-over-union scores +""" +import numpy as np + +from . import np_box_list_ops, np_mask_ops, np_box_mask_list + + +def box_list_to_box_mask_list(boxlist): + """Converts a BoxList containing 'masks' into a BoxMaskList. + + Args: + boxlist: An np_box_list.BoxList object. + + Returns: + An np_box_mask_list.BoxMaskList object. + + Raises: + ValueError: If boxlist does not contain `masks` as a field. + """ + if not boxlist.has_field('masks'): + raise ValueError('boxlist does not contain mask field.') + box_mask_list = np_box_mask_list.BoxMaskList( + box_data=boxlist.get(), + mask_data=boxlist.get_field('masks')) + extra_fields = boxlist.get_extra_fields() + for key in extra_fields: + if key != 'masks': + box_mask_list.data[key] = boxlist.get_field(key) + return box_mask_list + + +def area(box_mask_list): + """Computes area of masks. + + Args: + box_mask_list: np_box_mask_list.BoxMaskList holding N boxes and masks + + Returns: + a numpy array with shape [N*1] representing mask areas + """ + return np_mask_ops.area(box_mask_list.get_masks()) + + +def intersection(box_mask_list1, box_mask_list2): + """Compute pairwise intersection areas between masks. + + Args: + box_mask_list1: BoxMaskList holding N boxes and masks + box_mask_list2: BoxMaskList holding M boxes and masks + + Returns: + a numpy array with shape [N*M] representing pairwise intersection area + """ + return np_mask_ops.intersection(box_mask_list1.get_masks(), + box_mask_list2.get_masks()) + + +def iou(box_mask_list1, box_mask_list2): + """Computes pairwise intersection-over-union between box and mask collections. + + Args: + box_mask_list1: BoxMaskList holding N boxes and masks + box_mask_list2: BoxMaskList holding M boxes and masks + + Returns: + a numpy array with shape [N, M] representing pairwise iou scores. + """ + return np_mask_ops.iou(box_mask_list1.get_masks(), + box_mask_list2.get_masks()) + + +def ioa(box_mask_list1, box_mask_list2): + """Computes pairwise intersection-over-area between box and mask collections. + + Intersection-over-area (ioa) between two masks mask1 and mask2 is defined as + their intersection area over mask2's area. Note that ioa is not symmetric, + that is, IOA(mask1, mask2) != IOA(mask2, mask1). + + Args: + box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks + box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks + + Returns: + a numpy array with shape [N, M] representing pairwise ioa scores. + """ + return np_mask_ops.ioa(box_mask_list1.get_masks(), box_mask_list2.get_masks()) + + +def gather(box_mask_list, indices, fields=None): + """Gather boxes from np_box_mask_list.BoxMaskList according to indices. + + By default, gather returns boxes corresponding to the input index list, as + well as all additional fields stored in the box_mask_list (indexing into the + first dimension). However one can optionally only gather from a + subset of fields. + + Args: + box_mask_list: np_box_mask_list.BoxMaskList holding N boxes + indices: a 1-d numpy array of type int_ + fields: (optional) list of fields to also gather from. If None (default), + all fields are gathered from. Pass an empty fields list to only gather + the box coordinates. + + Returns: + subbox_mask_list: a np_box_mask_list.BoxMaskList corresponding to the subset + of the input box_mask_list specified by indices + + Raises: + ValueError: if specified field is not contained in box_mask_list or if the + indices are not of type int_ + """ + if fields is not None: + if 'masks' not in fields: + fields.append('masks') + return box_list_to_box_mask_list( + np_box_list_ops.gather( + boxlist=box_mask_list, indices=indices, fields=fields)) + + +def sort_by_field(box_mask_list, field, + order=np_box_list_ops.SortOrder.DESCEND): + """Sort boxes and associated fields according to a scalar field. + + A common use case is reordering the boxes according to descending scores. + + Args: + box_mask_list: BoxMaskList holding N boxes. + field: A BoxMaskList field for sorting and reordering the BoxMaskList. + order: (Optional) 'descend' or 'ascend'. Default is descend. + + Returns: + sorted_box_mask_list: A sorted BoxMaskList with the field in the specified + order. + """ + return box_list_to_box_mask_list( + np_box_list_ops.sort_by_field( + boxlist=box_mask_list, field=field, order=order)) + + +def non_max_suppression(box_mask_list, + max_output_size=10000, + iou_threshold=1.0, + score_threshold=-10.0): + """Non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. In each iteration, the detected bounding box with + highest score in the available pool is selected. + + Args: + box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain + a 'scores' field representing detection scores. All scores belong to the + same class. + max_output_size: maximum number of retained boxes + iou_threshold: intersection over union threshold. + score_threshold: minimum score threshold. Remove the boxes with scores + less than this value. Default value is set to -10. A very + low threshold to pass pretty much all the boxes, unless + the user sets a different score threshold. + + Returns: + an np_box_mask_list.BoxMaskList holding M boxes where M <= max_output_size + + Raises: + ValueError: if 'scores' field does not exist + ValueError: if threshold is not in [0, 1] + ValueError: if max_output_size < 0 + """ + if not box_mask_list.has_field('scores'): + raise ValueError('Field scores does not exist') + if iou_threshold < 0. or iou_threshold > 1.0: + raise ValueError('IOU threshold must be in [0, 1]') + if max_output_size < 0: + raise ValueError('max_output_size must be bigger than 0.') + + box_mask_list = filter_scores_greater_than(box_mask_list, score_threshold) + if box_mask_list.num_boxes() == 0: + return box_mask_list + + box_mask_list = sort_by_field(box_mask_list, 'scores') + + # Prevent further computation if NMS is disabled. + if iou_threshold == 1.0: + if box_mask_list.num_boxes() > max_output_size: + selected_indices = np.arange(max_output_size) + return gather(box_mask_list, selected_indices) + else: + return box_mask_list + + masks = box_mask_list.get_masks() + num_masks = box_mask_list.num_boxes() + + # is_index_valid is True only for all remaining valid boxes, + is_index_valid = np.full(num_masks, 1, dtype=bool) + selected_indices = [] + num_output = 0 + for i in range(num_masks): + if num_output < max_output_size: + if is_index_valid[i]: + num_output += 1 + selected_indices.append(i) + is_index_valid[i] = False + valid_indices = np.where(is_index_valid)[0] + if valid_indices.size == 0: + break + + intersect_over_union = np_mask_ops.iou( + np.expand_dims(masks[i], axis=0), masks[valid_indices]) + intersect_over_union = np.squeeze(intersect_over_union, axis=0) + is_index_valid[valid_indices] = np.logical_and( + is_index_valid[valid_indices], + intersect_over_union <= iou_threshold) + return gather(box_mask_list, np.array(selected_indices)) + + +def multi_class_non_max_suppression(box_mask_list, score_thresh, iou_thresh, + max_output_size): + """Multi-class version of non maximum suppression. + + This op greedily selects a subset of detection bounding boxes, pruning + away boxes that have high IOU (intersection over union) overlap (> thresh) + with already selected boxes. It operates independently for each class for + which scores are provided (via the scores field of the input box_list), + pruning boxes with score less than a provided threshold prior to + applying NMS. + + Args: + box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain a + 'scores' field representing detection scores. This scores field is a + tensor that can be 1 dimensional (in the case of a single class) or + 2-dimensional, in which case we assume that it takes the + shape [num_boxes, num_classes]. We further assume that this rank is known + statically and that scores.shape[1] is also known (i.e., the number of + classes is fixed and known at graph construction time). + score_thresh: scalar threshold for score (low scoring boxes are removed). + iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap + with previously selected boxes are removed). + max_output_size: maximum number of retained boxes per class. + + Returns: + a box_mask_list holding M boxes with a rank-1 scores field representing + corresponding scores for each box with scores sorted in decreasing order + and a rank-1 classes field representing a class label for each box. + Raises: + ValueError: if iou_thresh is not in [0, 1] or if input box_mask_list does + not have a valid scores field. + """ + if not 0 <= iou_thresh <= 1.0: + raise ValueError('thresh must be between 0 and 1') + if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList): + raise ValueError('box_mask_list must be a box_mask_list') + if not box_mask_list.has_field('scores'): + raise ValueError('input box_mask_list must have \'scores\' field') + scores = box_mask_list.get_field('scores') + if len(scores.shape) == 1: + scores = np.reshape(scores, [-1, 1]) + elif len(scores.shape) == 2: + if scores.shape[1] is None: + raise ValueError('scores field must have statically defined second ' + 'dimension') + else: + raise ValueError('scores field must be of rank 1 or 2') + + num_boxes = box_mask_list.num_boxes() + num_scores = scores.shape[0] + num_classes = scores.shape[1] + + if num_boxes != num_scores: + raise ValueError('Incorrect scores field length: actual vs expected.') + + selected_boxes_list = [] + for class_idx in range(num_classes): + box_mask_list_and_class_scores = np_box_mask_list.BoxMaskList( + box_data=box_mask_list.get(), + mask_data=box_mask_list.get_masks()) + class_scores = np.reshape(scores[0:num_scores, class_idx], [-1]) + box_mask_list_and_class_scores.add_field('scores', class_scores) + box_mask_list_filt = filter_scores_greater_than( + box_mask_list_and_class_scores, score_thresh) + nms_result = non_max_suppression( + box_mask_list_filt, + max_output_size=max_output_size, + iou_threshold=iou_thresh, + score_threshold=score_thresh) + nms_result.add_field( + 'classes', + np.zeros_like(nms_result.get_field('scores')) + class_idx) + selected_boxes_list.append(nms_result) + selected_boxes = np_box_list_ops.concatenate(selected_boxes_list) + sorted_boxes = np_box_list_ops.sort_by_field(selected_boxes, 'scores') + return box_list_to_box_mask_list(boxlist=sorted_boxes) + + +def prune_non_overlapping_masks(box_mask_list1, box_mask_list2, minoverlap=0.0): + """Prunes the boxes in list1 that overlap less than thresh with list2. + + For each mask in box_mask_list1, we want its IOA to be more than minoverlap + with at least one of the masks in box_mask_list2. If it does not, we remove + it. If the masks are not full size image, we do the pruning based on boxes. + + Args: + box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks. + box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks. + minoverlap: Minimum required overlap between boxes, to count them as + overlapping. + + Returns: + A pruned box_mask_list with size [N', 4]. + """ + intersection_over_area = ioa(box_mask_list2, box_mask_list1) # [M, N] tensor + intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor + keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) + keep_inds = np.nonzero(keep_bool)[0] + new_box_mask_list1 = gather(box_mask_list1, keep_inds) + return new_box_mask_list1 + + +def concatenate(box_mask_lists, fields=None): + """Concatenate list of box_mask_lists. + + This op concatenates a list of input box_mask_lists into a larger + box_mask_list. It also + handles concatenation of box_mask_list fields as long as the field tensor + shapes are equal except for the first dimension. + + Args: + box_mask_lists: list of np_box_mask_list.BoxMaskList objects + fields: optional list of fields to also concatenate. By default, all + fields from the first BoxMaskList in the list are included in the + concatenation. + + Returns: + a box_mask_list with number of boxes equal to + sum([box_mask_list.num_boxes() for box_mask_list in box_mask_list]) + Raises: + ValueError: if box_mask_lists is invalid (i.e., is not a list, is empty, or + contains non box_mask_list objects), or if requested fields are not + contained in all box_mask_lists + """ + if fields is not None: + if 'masks' not in fields: + fields.append('masks') + return box_list_to_box_mask_list( + np_box_list_ops.concatenate(boxlists=box_mask_lists, fields=fields)) + + +def filter_scores_greater_than(box_mask_list, thresh): + """Filter to keep only boxes and masks with score exceeding a given threshold. + + This op keeps the collection of boxes and masks whose corresponding scores are + greater than the input threshold. + + Args: + box_mask_list: BoxMaskList holding N boxes and masks. Must contain a + 'scores' field representing detection scores. + thresh: scalar threshold + + Returns: + a BoxMaskList holding M boxes and masks where M <= N + + Raises: + ValueError: if box_mask_list not a np_box_mask_list.BoxMaskList object or + if it does not have a scores field + """ + if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList): + raise ValueError('box_mask_list must be a BoxMaskList') + if not box_mask_list.has_field('scores'): + raise ValueError('input box_mask_list must have \'scores\' field') + scores = box_mask_list.get_field('scores') + if len(scores.shape) > 2: + raise ValueError('Scores should have rank 1 or 2') + if len(scores.shape) == 2 and scores.shape[1] != 1: + raise ValueError('Scores should have rank 1 or have shape ' + 'consistent with [None, 1]') + high_score_indices = np.reshape(np.where(np.greater(scores, thresh)), + [-1]).astype(np.int32) + return gather(box_mask_list, high_score_indices) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_ops.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_ops.py new file mode 100644 index 0000000..b4b46a7 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_box_ops.py @@ -0,0 +1,97 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Operations for [N, 4] numpy arrays representing bounding boxes. + +Example box operations that are supported: + * Areas: compute bounding box areas + * IOU: pairwise intersection-over-union scores +""" +import numpy as np + + +def area(boxes): + """Computes area of boxes. + + Args: + boxes: Numpy array with shape [N, 4] holding N boxes + + Returns: + a numpy array with shape [N*1] representing box areas + """ + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +def intersection(boxes1, boxes2): + """Compute pairwise intersection areas between boxes. + + Args: + boxes1: a numpy array with shape [N, 4] holding N boxes + boxes2: a numpy array with shape [M, 4] holding M boxes + + Returns: + a numpy array with shape [N*M] representing pairwise intersection area + """ + [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) + [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) + + all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) + all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) + intersect_heights = np.maximum( + np.zeros(all_pairs_max_ymin.shape), + all_pairs_min_ymax - all_pairs_max_ymin) + all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) + all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) + intersect_widths = np.maximum( + np.zeros(all_pairs_max_xmin.shape), + all_pairs_min_xmax - all_pairs_max_xmin) + return intersect_heights * intersect_widths + + +def iou(boxes1, boxes2): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxes1: a numpy array with shape [N, 4] holding N boxes. + boxes2: a numpy array with shape [M, 4] holding N boxes. + + Returns: + a numpy array with shape [N, M] representing pairwise iou scores. + """ + intersect = intersection(boxes1, boxes2) + area1 = area(boxes1) + area2 = area(boxes2) + union = np.expand_dims(area1, axis=1) + np.expand_dims( + area2, axis=0) - intersect + return intersect / union + + +def ioa(boxes1, boxes2): + """Computes pairwise intersection-over-area between box collections. + + Intersection-over-area (ioa) between two boxes box1 and box2 is defined as + their intersection area over box2's area. Note that ioa is not symmetric, + that is, IOA(box1, box2) != IOA(box2, box1). + + Args: + boxes1: a numpy array with shape [N, 4] holding N boxes. + boxes2: a numpy array with shape [M, 4] holding N boxes. + + Returns: + a numpy array with shape [N, M] representing pairwise ioa scores. + """ + intersect = intersection(boxes1, boxes2) + areas = np.expand_dims(area(boxes2), axis=0) + return intersect / areas diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_mask_ops.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_mask_ops.py new file mode 100644 index 0000000..b7918b4 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/np_mask_ops.py @@ -0,0 +1,119 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Operations for [N, height, width] numpy arrays representing masks. + +Example mask operations that are supported: + * Areas: compute mask areas + * IOU: pairwise intersection-over-union scores +""" +import numpy as np + +EPSILON = 1e-7 + + +def area(masks): + """Computes area of masks. + + Args: + masks: Numpy array with shape [N, height, width] holding N masks. Masks + values are of type np.uint8 and values are in {0,1}. + + Returns: + a numpy array with shape [N*1] representing mask areas. + + Raises: + ValueError: If masks.dtype is not np.uint8 + """ + if masks.dtype != np.uint8: + raise ValueError('Masks type should be np.uint8') + return np.sum(masks, axis=(1, 2), dtype=np.float32) + + +def intersection(masks1, masks2): + """Compute pairwise intersection areas between masks. + + Args: + masks1: a numpy array with shape [N, height, width] holding N masks. Masks + values are of type np.uint8 and values are in {0,1}. + masks2: a numpy array with shape [M, height, width] holding M masks. Masks + values are of type np.uint8 and values are in {0,1}. + + Returns: + a numpy array with shape [N*M] representing pairwise intersection area. + + Raises: + ValueError: If masks1 and masks2 are not of type np.uint8. + """ + if masks1.dtype != np.uint8 or masks2.dtype != np.uint8: + raise ValueError('masks1 and masks2 should be of type np.uint8') + n = masks1.shape[0] + m = masks2.shape[0] + answer = np.zeros([n, m], dtype=np.float32) + for i in np.arange(n): + for j in np.arange(m): + answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32) + return answer + + +def iou(masks1, masks2): + """Computes pairwise intersection-over-union between mask collections. + + Args: + masks1: a numpy array with shape [N, height, width] holding N masks. Masks + values are of type np.uint8 and values are in {0,1}. + masks2: a numpy array with shape [M, height, width] holding N masks. Masks + values are of type np.uint8 and values are in {0,1}. + + Returns: + a numpy array with shape [N, M] representing pairwise iou scores. + + Raises: + ValueError: If masks1 and masks2 are not of type np.uint8. + """ + if masks1.dtype != np.uint8 or masks2.dtype != np.uint8: + raise ValueError('masks1 and masks2 should be of type np.uint8') + intersect = intersection(masks1, masks2) + area1 = area(masks1) + area2 = area(masks2) + union = np.expand_dims(area1, axis=1) + np.expand_dims( + area2, axis=0) - intersect + return intersect / np.maximum(union, EPSILON) + + +def ioa(masks1, masks2): + """Computes pairwise intersection-over-area between box collections. + + Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as + their intersection area over mask2's area. Note that ioa is not symmetric, + that is, IOA(mask1, mask2) != IOA(mask2, mask1). + + Args: + masks1: a numpy array with shape [N, height, width] holding N masks. Masks + values are of type np.uint8 and values are in {0,1}. + masks2: a numpy array with shape [M, height, width] holding N masks. Masks + values are of type np.uint8 and values are in {0,1}. + + Returns: + a numpy array with shape [N, M] representing pairwise ioa scores. + + Raises: + ValueError: If masks1 and masks2 are not of type np.uint8. + """ + if masks1.dtype != np.uint8 or masks2.dtype != np.uint8: + raise ValueError('masks1 and masks2 should be of type np.uint8') + intersect = intersection(masks1, masks2) + areas = np.expand_dims(area(masks2), axis=0) + return intersect / (areas + EPSILON) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/object_detection_evaluation.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/object_detection_evaluation.py new file mode 100644 index 0000000..01616eb --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/object_detection_evaluation.py @@ -0,0 +1,713 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""object_detection_evaluation module. + +ObjectDetectionEvaluation is a class which manages ground truth information of a +object detection dataset, and computes frequently used detection metrics such as +Precision, Recall, CorLoc of the provided detection results. +It supports the following operations: +1) Add ground truth information of images sequentially. +2) Add detection result of images sequentially. +3) Evaluate detection metrics on already inserted detection results. +4) Write evaluation result into a pickle file for future processing or + visualization. + +Note: This module operates on numpy boxes and box lists. +""" + +from abc import ABCMeta +from abc import abstractmethod +import collections +import logging +import numpy as np + +from . import metrics, standard_fields, label_map_util, per_image_evaluation + +logger = logging.getLogger("alphaction.inference") + +class DetectionEvaluator(object): + """Interface for object detection evalution classes. + + Example usage of the Evaluator: + ------------------------------ + evaluator = DetectionEvaluator(categories) + + # Detections and groundtruth for image 1. + evaluator.add_single_groundtruth_image_info(...) + evaluator.add_single_detected_image_info(...) + + # Detections and groundtruth for image 2. + evaluator.add_single_groundtruth_image_info(...) + evaluator.add_single_detected_image_info(...) + + metrics_dict = evaluator.evaluate() + """ + __metaclass__ = ABCMeta + + def __init__(self, categories): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + """ + self._categories = categories + + @abstractmethod + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary of groundtruth numpy arrays required + for evaluations. + """ + pass + + @abstractmethod + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary of detection numpy arrays required + for evaluation. + """ + pass + + @abstractmethod + def evaluate(self): + """Evaluates detections and returns a dictionary of metrics.""" + pass + + @abstractmethod + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + pass + + +class ObjectDetectionEvaluator(DetectionEvaluator): + """A class to evaluate detections.""" + + def __init__(self, + categories, + matching_iou_threshold=0.5, + evaluate_corlocs=False, + metric_prefix=None, + use_weighted_mean_ap=False, + evaluate_masks=False): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + matching_iou_threshold: IOU threshold to use for matching groundtruth + boxes to detection boxes. + evaluate_corlocs: (optional) boolean which determines if corloc scores + are to be returned or not. + metric_prefix: (optional) string prefix for metric name; if None, no + prefix is used. + use_weighted_mean_ap: (optional) boolean which determines if the mean + average precision is computed directly from the scores and tp_fp_labels + of all classes. + evaluate_masks: If False, evaluation will be performed based on boxes. + If True, mask evaluation will be performed instead. + + Raises: + ValueError: If the category ids are not 1-indexed. + """ + super(ObjectDetectionEvaluator, self).__init__(categories) + self._num_classes = max([cat['id'] for cat in categories]) + if min(cat['id'] for cat in categories) < 1: + raise ValueError('Classes should be 1-indexed.') + self._matching_iou_threshold = matching_iou_threshold + self._use_weighted_mean_ap = use_weighted_mean_ap + self._label_id_offset = 1 + self._evaluate_masks = evaluate_masks + self._evaluation = ObjectDetectionEvaluation( + num_groundtruth_classes=self._num_classes, + matching_iou_threshold=self._matching_iou_threshold, + use_weighted_mean_ap=self._use_weighted_mean_ap, + label_id_offset=self._label_id_offset) + self._image_ids = set([]) + self._evaluate_corlocs = evaluate_corlocs + self._metric_prefix = (metric_prefix + '_') if metric_prefix else '' + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array + of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of + the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.InputDataFields.groundtruth_classes: integer numpy array + of shape [num_boxes] containing 1-indexed groundtruth classes for the + boxes. + standard_fields.InputDataFields.groundtruth_difficult: Optional length + M numpy boolean array denoting whether a ground truth box is a + difficult instance or not. This field is optional to support the case + that no boxes are difficult. + standard_fields.InputDataFields.groundtruth_instance_masks: Optional + numpy array of shape [num_boxes, height, width] with values in {0, 1}. + + Raises: + ValueError: On adding groundtruth for an image more than once. Will also + raise error if instance masks are not in groundtruth dictionary. + """ + if image_id in self._image_ids: + raise ValueError('Image with id {} already added.'.format(image_id)) + + groundtruth_classes = ( + groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] - + self._label_id_offset) + # If the key is not present in the groundtruth_dict or the array is empty + # (unless there are no annotations for the groundtruth on this image) + # use values from the dictionary or insert None otherwise. + if (standard_fields.InputDataFields.groundtruth_difficult in + groundtruth_dict.keys() and + (groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult] + .size or not groundtruth_classes.size)): + groundtruth_difficult = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_difficult] + else: + groundtruth_difficult = None + if not len(self._image_ids) % 1000: + logger.warning( + 'image %s does not have groundtruth difficult flag specified', + image_id) + groundtruth_masks = None + if self._evaluate_masks: + if (standard_fields.InputDataFields.groundtruth_instance_masks not in + groundtruth_dict): + raise ValueError('Instance masks not in groundtruth dictionary.') + groundtruth_masks = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_instance_masks] + self._evaluation.add_single_ground_truth_image_info( + image_key=image_id, + groundtruth_boxes=groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_boxes], + groundtruth_class_labels=groundtruth_classes, + groundtruth_is_difficult_list=groundtruth_difficult, + groundtruth_masks=groundtruth_masks) + self._image_ids.update([image_id]) + + def add_single_detected_image_info(self, image_id, detections_dict): + """Adds detections for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + detections_dict: A dictionary containing - + standard_fields.DetectionResultFields.detection_boxes: float32 numpy + array of shape [num_boxes, 4] containing `num_boxes` detection boxes + of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.DetectionResultFields.detection_scores: float32 numpy + array of shape [num_boxes] containing detection scores for the boxes. + standard_fields.DetectionResultFields.detection_classes: integer numpy + array of shape [num_boxes] containing 1-indexed detection classes for + the boxes. + standard_fields.DetectionResultFields.detection_masks: uint8 numpy + array of shape [num_boxes, height, width] containing `num_boxes` masks + of values ranging between 0 and 1. + + Raises: + ValueError: If detection masks are not in detections dictionary. + """ + detection_classes = ( + detections_dict[standard_fields.DetectionResultFields.detection_classes] + - self._label_id_offset) + detection_masks = None + if self._evaluate_masks: + if (standard_fields.DetectionResultFields.detection_masks not in + detections_dict): + raise ValueError('Detection masks not in detections dictionary.') + detection_masks = detections_dict[ + standard_fields.DetectionResultFields.detection_masks] + self._evaluation.add_single_detected_image_info( + image_key=image_id, + detected_boxes=detections_dict[ + standard_fields.DetectionResultFields.detection_boxes], + detected_scores=detections_dict[ + standard_fields.DetectionResultFields.detection_scores], + detected_class_labels=detection_classes, + detected_masks=detection_masks) + + def evaluate(self): + """Compute evaluation result. + + Returns: + A dictionary of metrics with the following fields - + + 1. summary_metrics: + 'Precision/mAP@IOU': mean average precision at + the specified IOU threshold. + + 2. per_category_ap: category specific results with keys of the form + 'PerformanceByCategory/mAP@IOU/category'. + """ + (per_class_ap, mean_ap, _, _, per_class_corloc, mean_corloc) = ( + self._evaluation.evaluate()) + pascal_metrics = { + self._metric_prefix + + 'Precision/mAP@{}IOU'.format(self._matching_iou_threshold): + mean_ap + } + if self._evaluate_corlocs: + pascal_metrics[self._metric_prefix + 'Precision/meanCorLoc@{}IOU'.format( + self._matching_iou_threshold)] = mean_corloc + category_index = label_map_util.create_category_index(self._categories) + for idx in range(per_class_ap.size): + if idx + self._label_id_offset in category_index: + display_name = ( + self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format( + self._matching_iou_threshold, + category_index[idx + self._label_id_offset]['name'])) + pascal_metrics[display_name] = per_class_ap[idx] + + # Optionally add CorLoc metrics.classes + if self._evaluate_corlocs: + display_name = ( + self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}' + .format(self._matching_iou_threshold, + category_index[idx + self._label_id_offset]['name'])) + pascal_metrics[display_name] = per_class_corloc[idx] + + return pascal_metrics + + def clear(self): + """Clears the state to prepare for a fresh evaluation.""" + self._evaluation = ObjectDetectionEvaluation( + num_groundtruth_classes=self._num_classes, + matching_iou_threshold=self._matching_iou_threshold, + use_weighted_mean_ap=self._use_weighted_mean_ap, + label_id_offset=self._label_id_offset) + self._image_ids.clear() + + +class PascalDetectionEvaluator(ObjectDetectionEvaluator): + """A class to evaluate detections using PASCAL metrics.""" + + def __init__(self, categories, matching_iou_threshold=0.5): + super(PascalDetectionEvaluator, self).__init__( + categories, + matching_iou_threshold=matching_iou_threshold, + evaluate_corlocs=False, + metric_prefix='PascalBoxes', + use_weighted_mean_ap=False) + + +class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator): + """A class to evaluate detections using weighted PASCAL metrics. + + Weighted PASCAL metrics computes the mean average precision as the average + precision given the scores and tp_fp_labels of all classes. In comparison, + PASCAL metrics computes the mean average precision as the mean of the + per-class average precisions. + + This definition is very similar to the mean of the per-class average + precisions weighted by class frequency. However, they are typically not the + same as the average precision is not a linear function of the scores and + tp_fp_labels. + """ + + def __init__(self, categories, matching_iou_threshold=0.5): + super(WeightedPascalDetectionEvaluator, self).__init__( + categories, + matching_iou_threshold=matching_iou_threshold, + evaluate_corlocs=False, + metric_prefix='WeightedPascalBoxes', + use_weighted_mean_ap=True) + + +class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator): + """A class to evaluate instance masks using PASCAL metrics.""" + + def __init__(self, categories, matching_iou_threshold=0.5): + super(PascalInstanceSegmentationEvaluator, self).__init__( + categories, + matching_iou_threshold=matching_iou_threshold, + evaluate_corlocs=False, + metric_prefix='PascalMasks', + use_weighted_mean_ap=False, + evaluate_masks=True) + + +class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator): + """A class to evaluate instance masks using weighted PASCAL metrics. + + Weighted PASCAL metrics computes the mean average precision as the average + precision given the scores and tp_fp_labels of all classes. In comparison, + PASCAL metrics computes the mean average precision as the mean of the + per-class average precisions. + + This definition is very similar to the mean of the per-class average + precisions weighted by class frequency. However, they are typically not the + same as the average precision is not a linear function of the scores and + tp_fp_labels. + """ + + def __init__(self, categories, matching_iou_threshold=0.5): + super(WeightedPascalInstanceSegmentationEvaluator, self).__init__( + categories, + matching_iou_threshold=matching_iou_threshold, + evaluate_corlocs=False, + metric_prefix='WeightedPascalMasks', + use_weighted_mean_ap=True, + evaluate_masks=True) + + +class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator): + """A class to evaluate detections using Open Images V2 metrics. + + Open Images V2 introduce group_of type of bounding boxes and this metric + handles those boxes appropriately. + """ + + def __init__(self, + categories, + matching_iou_threshold=0.5, + evaluate_corlocs=False): + """Constructor. + + Args: + categories: A list of dicts, each of which has the following keys - + 'id': (required) an integer id uniquely identifying this category. + 'name': (required) string representing category name e.g., 'cat', 'dog'. + matching_iou_threshold: IOU threshold to use for matching groundtruth + boxes to detection boxes. + evaluate_corlocs: if True, additionally evaluates and returns CorLoc. + """ + super(OpenImagesDetectionEvaluator, self).__init__( + categories, + matching_iou_threshold, + evaluate_corlocs, + metric_prefix='OpenImagesV2') + + def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): + """Adds groundtruth for a single image to be used for evaluation. + + Args: + image_id: A unique string/integer identifier for the image. + groundtruth_dict: A dictionary containing - + standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array + of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of + the format [ymin, xmin, ymax, xmax] in absolute image coordinates. + standard_fields.InputDataFields.groundtruth_classes: integer numpy array + of shape [num_boxes] containing 1-indexed groundtruth classes for the + boxes. + standard_fields.InputDataFields.groundtruth_group_of: Optional length + M numpy boolean array denoting whether a groundtruth box contains a + group of instances. + + Raises: + ValueError: On adding groundtruth for an image more than once. + """ + if image_id in self._image_ids: + raise ValueError('Image with id {} already added.'.format(image_id)) + + groundtruth_classes = ( + groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] - + self._label_id_offset) + # If the key is not present in the groundtruth_dict or the array is empty + # (unless there are no annotations for the groundtruth on this image) + # use values from the dictionary or insert None otherwise. + if (standard_fields.InputDataFields.groundtruth_group_of in + groundtruth_dict.keys() and + (groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of] + .size or not groundtruth_classes.size)): + groundtruth_group_of = groundtruth_dict[ + standard_fields.InputDataFields.groundtruth_group_of] + else: + groundtruth_group_of = None + if not len(self._image_ids) % 1000: + logger.warning( + 'image %s does not have groundtruth group_of flag specified', + image_id) + self._evaluation.add_single_ground_truth_image_info( + image_id, + groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes], + groundtruth_classes, + groundtruth_is_difficult_list=None, + groundtruth_is_group_of_list=groundtruth_group_of) + self._image_ids.update([image_id]) + + +ObjectDetectionEvalMetrics = collections.namedtuple( + 'ObjectDetectionEvalMetrics', [ + 'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs', + 'mean_corloc' + ]) + + +class ObjectDetectionEvaluation(object): + """Internal implementation of Pascal object detection metrics.""" + + def __init__(self, + num_groundtruth_classes, + matching_iou_threshold=0.5, + nms_iou_threshold=1.0, + nms_max_output_boxes=10000, + use_weighted_mean_ap=False, + label_id_offset=0): + if num_groundtruth_classes < 1: + raise ValueError('Need at least 1 groundtruth class for evaluation.') + + self.per_image_eval = per_image_evaluation.PerImageEvaluation( + num_groundtruth_classes=num_groundtruth_classes, + matching_iou_threshold=matching_iou_threshold, + nms_iou_threshold=nms_iou_threshold, + nms_max_output_boxes=nms_max_output_boxes) + self.num_class = num_groundtruth_classes + self.use_weighted_mean_ap = use_weighted_mean_ap + self.label_id_offset = label_id_offset + + self.groundtruth_boxes = {} + self.groundtruth_class_labels = {} + self.groundtruth_masks = {} + self.groundtruth_is_difficult_list = {} + self.groundtruth_is_group_of_list = {} + self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=int) + self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int) + + self._initialize_detections() + + def _initialize_detections(self): + self.detection_keys = set() + self.scores_per_class = [[] for _ in range(self.num_class)] + self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)] + self.num_images_correctly_detected_per_class = np.zeros(self.num_class) + self.average_precision_per_class = np.empty(self.num_class, dtype=float) + self.average_precision_per_class.fill(np.nan) + self.precisions_per_class = [] + self.recalls_per_class = [] + self.corloc_per_class = np.ones(self.num_class, dtype=float) + + def clear_detections(self): + self._initialize_detections() + + def add_single_ground_truth_image_info(self, + image_key, + groundtruth_boxes, + groundtruth_class_labels, + groundtruth_is_difficult_list=None, + groundtruth_is_group_of_list=None, + groundtruth_masks=None): + """Adds groundtruth for a single image to be used for evaluation. + + Args: + image_key: A unique string/integer identifier for the image. + groundtruth_boxes: float32 numpy array of shape [num_boxes, 4] + containing `num_boxes` groundtruth boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + groundtruth_class_labels: integer numpy array of shape [num_boxes] + containing 0-indexed groundtruth classes for the boxes. + groundtruth_is_difficult_list: A length M numpy boolean array denoting + whether a ground truth box is a difficult instance or not. To support + the case that no boxes are difficult, it is by default set as None. + groundtruth_is_group_of_list: A length M numpy boolean array denoting + whether a ground truth box is a group-of box or not. To support + the case that no boxes are groups-of, it is by default set as None. + groundtruth_masks: uint8 numpy array of shape + [num_boxes, height, width] containing `num_boxes` groundtruth masks. + The mask values range from 0 to 1. + """ + if image_key in self.groundtruth_boxes: + logger.warning( + 'image %s has already been added to the ground truth database.', + image_key) + return + + self.groundtruth_boxes[image_key] = groundtruth_boxes + self.groundtruth_class_labels[image_key] = groundtruth_class_labels + self.groundtruth_masks[image_key] = groundtruth_masks + if groundtruth_is_difficult_list is None: + num_boxes = groundtruth_boxes.shape[0] + groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool) + self.groundtruth_is_difficult_list[ + image_key] = groundtruth_is_difficult_list.astype(dtype=bool) + if groundtruth_is_group_of_list is None: + num_boxes = groundtruth_boxes.shape[0] + groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool) + self.groundtruth_is_group_of_list[ + image_key] = groundtruth_is_group_of_list.astype(dtype=bool) + + self._update_ground_truth_statistics( + groundtruth_class_labels, + groundtruth_is_difficult_list.astype(dtype=bool), + groundtruth_is_group_of_list.astype(dtype=bool)) + + def add_single_detected_image_info(self, + image_key, + detected_boxes, + detected_scores, + detected_class_labels, + detected_masks=None): + """Adds detections for a single image to be used for evaluation. + + Args: + image_key: A unique string/integer identifier for the image. + detected_boxes: float32 numpy array of shape [num_boxes, 4] + containing `num_boxes` detection boxes of the format + [ymin, xmin, ymax, xmax] in absolute image coordinates. + detected_scores: float32 numpy array of shape [num_boxes] containing + detection scores for the boxes. + detected_class_labels: integer numpy array of shape [num_boxes] containing + 0-indexed detection classes for the boxes. + detected_masks: np.uint8 numpy array of shape [num_boxes, height, width] + containing `num_boxes` detection masks with values ranging + between 0 and 1. + + Raises: + ValueError: if the number of boxes, scores and class labels differ in + length. + """ + if (len(detected_boxes) != len(detected_scores) or + len(detected_boxes) != len(detected_class_labels)): + raise ValueError('detected_boxes, detected_scores and ' + 'detected_class_labels should all have same lengths. Got' + '[%d, %d, %d]' % len(detected_boxes), + len(detected_scores), len(detected_class_labels)) + + if image_key in self.detection_keys: + logger.warning( + 'image %s has already been added to the detection result database', + image_key) + return + + self.detection_keys.add(image_key) + if image_key in self.groundtruth_boxes: + groundtruth_boxes = self.groundtruth_boxes[image_key] + groundtruth_class_labels = self.groundtruth_class_labels[image_key] + # Masks are popped instead of look up. The reason is that we do not want + # to keep all masks in memory which can cause memory overflow. + groundtruth_masks = self.groundtruth_masks.pop(image_key) + groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[ + image_key] + groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[ + image_key] + else: + groundtruth_boxes = np.empty(shape=[0, 4], dtype=float) + groundtruth_class_labels = np.array([], dtype=int) + if detected_masks is None: + groundtruth_masks = None + else: + groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float) + groundtruth_is_difficult_list = np.array([], dtype=bool) + groundtruth_is_group_of_list = np.array([], dtype=bool) + scores, tp_fp_labels, is_class_correctly_detected_in_image = ( + self.per_image_eval.compute_object_detection_metrics( + detected_boxes=detected_boxes, + detected_scores=detected_scores, + detected_class_labels=detected_class_labels, + groundtruth_boxes=groundtruth_boxes, + groundtruth_class_labels=groundtruth_class_labels, + groundtruth_is_difficult_list=groundtruth_is_difficult_list, + groundtruth_is_group_of_list=groundtruth_is_group_of_list, + detected_masks=detected_masks, + groundtruth_masks=groundtruth_masks)) + + for i in range(self.num_class): + if scores[i].shape[0] > 0: + self.scores_per_class[i].append(scores[i]) + self.tp_fp_labels_per_class[i].append(tp_fp_labels[i]) + self.num_images_correctly_detected_per_class += is_class_correctly_detected_in_image + + def _update_ground_truth_statistics(self, groundtruth_class_labels, + groundtruth_is_difficult_list, + groundtruth_is_group_of_list): + """Update grouth truth statitistics. + + 1. Difficult boxes are ignored when counting the number of ground truth + instances as done in Pascal VOC devkit. + 2. Difficult boxes are treated as normal boxes when computing CorLoc related + statitistics. + + Args: + groundtruth_class_labels: An integer numpy array of length M, + representing M class labels of object instances in ground truth + groundtruth_is_difficult_list: A boolean numpy array of length M denoting + whether a ground truth box is a difficult instance or not + groundtruth_is_group_of_list: A boolean numpy array of length M denoting + whether a ground truth box is a group-of box or not + """ + for class_index in range(self.num_class): + num_gt_instances = np.sum(groundtruth_class_labels[ + ~groundtruth_is_difficult_list + & ~groundtruth_is_group_of_list] == class_index) + self.num_gt_instances_per_class[class_index] += num_gt_instances + if np.any(groundtruth_class_labels == class_index): + self.num_gt_imgs_per_class[class_index] += 1 + + def evaluate(self): + """Compute evaluation result. + + Returns: + A named tuple with the following fields - + average_precision: float numpy array of average precision for + each class. + mean_ap: mean average precision of all classes, float scalar + precisions: List of precisions, each precision is a float numpy + array + recalls: List of recalls, each recall is a float numpy array + corloc: numpy float array + mean_corloc: Mean CorLoc score for each class, float scalar + """ + if (self.num_gt_instances_per_class == 0).any(): + logger.info( + 'The following classes have no ground truth examples: %s', + np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) + + self.label_id_offset) + + if self.use_weighted_mean_ap: + all_scores = np.array([], dtype=float) + all_tp_fp_labels = np.array([], dtype=bool) + + for class_index in range(self.num_class): + if self.num_gt_instances_per_class[class_index] == 0: + continue + if not self.scores_per_class[class_index]: + scores = np.array([], dtype=float) + tp_fp_labels = np.array([], dtype=bool) + else: + scores = np.concatenate(self.scores_per_class[class_index]) + tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index]) + if self.use_weighted_mean_ap: + all_scores = np.append(all_scores, scores) + all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels) + precision, recall = metrics.compute_precision_recall( + scores, tp_fp_labels, self.num_gt_instances_per_class[class_index]) + self.precisions_per_class.append(precision) + self.recalls_per_class.append(recall) + average_precision = metrics.compute_average_precision(precision, recall) + self.average_precision_per_class[class_index] = average_precision + + self.corloc_per_class = metrics.compute_cor_loc( + self.num_gt_imgs_per_class, + self.num_images_correctly_detected_per_class) + + if self.use_weighted_mean_ap: + num_gt_instances = np.sum(self.num_gt_instances_per_class) + precision, recall = metrics.compute_precision_recall( + all_scores, all_tp_fp_labels, num_gt_instances) + mean_ap = metrics.compute_average_precision(precision, recall) + else: + mean_ap = np.nanmean(self.average_precision_per_class) + mean_corloc = np.nanmean(self.corloc_per_class) + return ObjectDetectionEvalMetrics( + self.average_precision_per_class, mean_ap, self.precisions_per_class, + self.recalls_per_class, self.corloc_per_class, mean_corloc) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/per_image_evaluation.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/per_image_evaluation.py new file mode 100644 index 0000000..02926db --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/per_image_evaluation.py @@ -0,0 +1,564 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Evaluate Object Detection result on a single image. + +Annotate each detected result as true positives or false positive according to +a predefined IOU ratio. Non Maximum Supression is used by default. Multi class +detection is supported by default. +Based on the settings, per image evaluation is either performed on boxes or +on object masks. +""" +import numpy as np + +from . import np_box_list_ops, np_box_mask_list_ops, np_box_list, np_box_mask_list + + +class PerImageEvaluation(object): + """Evaluate detection result of a single image.""" + + def __init__(self, + num_groundtruth_classes, + matching_iou_threshold=0.5, + nms_iou_threshold=0.3, + nms_max_output_boxes=50): + """Initialized PerImageEvaluation by evaluation parameters. + + Args: + num_groundtruth_classes: Number of ground truth object classes + matching_iou_threshold: A ratio of area intersection to union, which is + the threshold to consider whether a detection is true positive or not + nms_iou_threshold: IOU threshold used in Non Maximum Suppression. + nms_max_output_boxes: Number of maximum output boxes in NMS. + """ + self.matching_iou_threshold = matching_iou_threshold + self.nms_iou_threshold = nms_iou_threshold + self.nms_max_output_boxes = nms_max_output_boxes + self.num_groundtruth_classes = num_groundtruth_classes + + def compute_object_detection_metrics( + self, detected_boxes, detected_scores, detected_class_labels, + groundtruth_boxes, groundtruth_class_labels, + groundtruth_is_difficult_list, groundtruth_is_group_of_list, + detected_masks=None, groundtruth_masks=None): + """Evaluates detections as being tp, fp or ignored from a single image. + + The evaluation is done in two stages: + 1. All detections are matched to non group-of boxes; true positives are + determined and detections matched to difficult boxes are ignored. + 2. Detections that are determined as false positives are matched against + group-of boxes and ignored if matched. + + Args: + detected_boxes: A float numpy array of shape [N, 4], representing N + regions of detected object regions. + Each row is of the format [y_min, x_min, y_max, x_max] + detected_scores: A float numpy array of shape [N, 1], representing + the confidence scores of the detected N object instances. + detected_class_labels: A integer numpy array of shape [N, 1], repreneting + the class labels of the detected N object instances. + groundtruth_boxes: A float numpy array of shape [M, 4], representing M + regions of object instances in ground truth + groundtruth_class_labels: An integer numpy array of shape [M, 1], + representing M class labels of object instances in ground truth + groundtruth_is_difficult_list: A boolean numpy array of length M denoting + whether a ground truth box is a difficult instance or not + groundtruth_is_group_of_list: A boolean numpy array of length M denoting + whether a ground truth box has group-of tag + detected_masks: (optional) A uint8 numpy array of shape + [N, height, width]. If not None, the metrics will be computed based + on masks. + groundtruth_masks: (optional) A uint8 numpy array of shape + [M, height, width]. + + Returns: + scores: A list of C float numpy arrays. Each numpy array is of + shape [K, 1], representing K scores detected with object class + label c + tp_fp_labels: A list of C boolean numpy arrays. Each numpy array + is of shape [K, 1], representing K True/False positive label of + object instances detected with class label c + is_class_correctly_detected_in_image: a numpy integer array of + shape [C, 1], indicating whether the correponding class has a least + one instance being correctly detected in the image + """ + detected_boxes, detected_scores, detected_class_labels, detected_masks = ( + self._remove_invalid_boxes(detected_boxes, detected_scores, + detected_class_labels, detected_masks)) + scores, tp_fp_labels = self._compute_tp_fp( + detected_boxes=detected_boxes, + detected_scores=detected_scores, + detected_class_labels=detected_class_labels, + groundtruth_boxes=groundtruth_boxes, + groundtruth_class_labels=groundtruth_class_labels, + groundtruth_is_difficult_list=groundtruth_is_difficult_list, + groundtruth_is_group_of_list=groundtruth_is_group_of_list, + detected_masks=detected_masks, + groundtruth_masks=groundtruth_masks) + + is_class_correctly_detected_in_image = self._compute_cor_loc( + detected_boxes=detected_boxes, + detected_scores=detected_scores, + detected_class_labels=detected_class_labels, + groundtruth_boxes=groundtruth_boxes, + groundtruth_class_labels=groundtruth_class_labels, + detected_masks=detected_masks, + groundtruth_masks=groundtruth_masks) + + return scores, tp_fp_labels, is_class_correctly_detected_in_image + + def _compute_cor_loc(self, detected_boxes, detected_scores, + detected_class_labels, groundtruth_boxes, + groundtruth_class_labels, detected_masks=None, + groundtruth_masks=None): + """Compute CorLoc score for object detection result. + + Args: + detected_boxes: A float numpy array of shape [N, 4], representing N + regions of detected object regions. + Each row is of the format [y_min, x_min, y_max, x_max] + detected_scores: A float numpy array of shape [N, 1], representing + the confidence scores of the detected N object instances. + detected_class_labels: A integer numpy array of shape [N, 1], repreneting + the class labels of the detected N object instances. + groundtruth_boxes: A float numpy array of shape [M, 4], representing M + regions of object instances in ground truth + groundtruth_class_labels: An integer numpy array of shape [M, 1], + representing M class labels of object instances in ground truth + detected_masks: (optional) A uint8 numpy array of shape + [N, height, width]. If not None, the scores will be computed based + on masks. + groundtruth_masks: (optional) A uint8 numpy array of shape + [M, height, width]. + + Returns: + is_class_correctly_detected_in_image: a numpy integer array of + shape [C, 1], indicating whether the correponding class has a least + one instance being correctly detected in the image + + Raises: + ValueError: If detected masks is not None but groundtruth masks are None, + or the other way around. + """ + if (detected_masks is not None and + groundtruth_masks is None) or (detected_masks is None and + groundtruth_masks is not None): + raise ValueError( + 'If `detected_masks` is provided, then `groundtruth_masks` should ' + 'also be provided.' + ) + + is_class_correctly_detected_in_image = np.zeros( + self.num_groundtruth_classes, dtype=int) + for i in range(self.num_groundtruth_classes): + (gt_boxes_at_ith_class, gt_masks_at_ith_class, + detected_boxes_at_ith_class, detected_scores_at_ith_class, + detected_masks_at_ith_class) = self._get_ith_class_arrays( + detected_boxes, detected_scores, detected_masks, + detected_class_labels, groundtruth_boxes, groundtruth_masks, + groundtruth_class_labels, i) + is_class_correctly_detected_in_image[i] = ( + self._compute_is_class_correctly_detected_in_image( + detected_boxes=detected_boxes_at_ith_class, + detected_scores=detected_scores_at_ith_class, + groundtruth_boxes=gt_boxes_at_ith_class, + detected_masks=detected_masks_at_ith_class, + groundtruth_masks=gt_masks_at_ith_class)) + + return is_class_correctly_detected_in_image + + def _compute_is_class_correctly_detected_in_image( + self, detected_boxes, detected_scores, groundtruth_boxes, + detected_masks=None, groundtruth_masks=None): + """Compute CorLoc score for a single class. + + Args: + detected_boxes: A numpy array of shape [N, 4] representing detected box + coordinates + detected_scores: A 1-d numpy array of length N representing classification + score + groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth + box coordinates + detected_masks: (optional) A np.uint8 numpy array of shape + [N, height, width]. If not None, the scores will be computed based + on masks. + groundtruth_masks: (optional) A np.uint8 numpy array of shape + [M, height, width]. + + Returns: + is_class_correctly_detected_in_image: An integer 1 or 0 denoting whether a + class is correctly detected in the image or not + """ + if detected_boxes.size > 0: + if groundtruth_boxes.size > 0: + max_score_id = np.argmax(detected_scores) + mask_mode = False + if detected_masks is not None and groundtruth_masks is not None: + mask_mode = True + if mask_mode: + detected_boxlist = np_box_mask_list.BoxMaskList( + box_data=np.expand_dims(detected_boxes[max_score_id], axis=0), + mask_data=np.expand_dims(detected_masks[max_score_id], axis=0)) + gt_boxlist = np_box_mask_list.BoxMaskList( + box_data=groundtruth_boxes, mask_data=groundtruth_masks) + iou = np_box_mask_list_ops.iou(detected_boxlist, gt_boxlist) + else: + detected_boxlist = np_box_list.BoxList( + np.expand_dims(detected_boxes[max_score_id, :], axis=0)) + gt_boxlist = np_box_list.BoxList(groundtruth_boxes) + iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist) + if np.max(iou) >= self.matching_iou_threshold: + return 1 + return 0 + + def _compute_tp_fp(self, detected_boxes, detected_scores, + detected_class_labels, groundtruth_boxes, + groundtruth_class_labels, groundtruth_is_difficult_list, + groundtruth_is_group_of_list, + detected_masks=None, groundtruth_masks=None): + """Labels true/false positives of detections of an image across all classes. + + Args: + detected_boxes: A float numpy array of shape [N, 4], representing N + regions of detected object regions. + Each row is of the format [y_min, x_min, y_max, x_max] + detected_scores: A float numpy array of shape [N, 1], representing + the confidence scores of the detected N object instances. + detected_class_labels: A integer numpy array of shape [N, 1], repreneting + the class labels of the detected N object instances. + groundtruth_boxes: A float numpy array of shape [M, 4], representing M + regions of object instances in ground truth + groundtruth_class_labels: An integer numpy array of shape [M, 1], + representing M class labels of object instances in ground truth + groundtruth_is_difficult_list: A boolean numpy array of length M denoting + whether a ground truth box is a difficult instance or not + groundtruth_is_group_of_list: A boolean numpy array of length M denoting + whether a ground truth box has group-of tag + detected_masks: (optional) A np.uint8 numpy array of shape + [N, height, width]. If not None, the scores will be computed based + on masks. + groundtruth_masks: (optional) A np.uint8 numpy array of shape + [M, height, width]. + + Returns: + result_scores: A list of float numpy arrays. Each numpy array is of + shape [K, 1], representing K scores detected with object class + label c + result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of + shape [K, 1], representing K True/False positive label of object + instances detected with class label c + + Raises: + ValueError: If detected masks is not None but groundtruth masks are None, + or the other way around. + """ + if detected_masks is not None and groundtruth_masks is None: + raise ValueError( + 'Detected masks is available but groundtruth masks is not.') + if detected_masks is None and groundtruth_masks is not None: + raise ValueError( + 'Groundtruth masks is available but detected masks is not.') + + result_scores = [] + result_tp_fp_labels = [] + for i in range(self.num_groundtruth_classes): + groundtruth_is_difficult_list_at_ith_class = ( + groundtruth_is_difficult_list[groundtruth_class_labels == i]) + groundtruth_is_group_of_list_at_ith_class = ( + groundtruth_is_group_of_list[groundtruth_class_labels == i]) + (gt_boxes_at_ith_class, gt_masks_at_ith_class, + detected_boxes_at_ith_class, detected_scores_at_ith_class, + detected_masks_at_ith_class) = self._get_ith_class_arrays( + detected_boxes, detected_scores, detected_masks, + detected_class_labels, groundtruth_boxes, groundtruth_masks, + groundtruth_class_labels, i) + scores, tp_fp_labels = self._compute_tp_fp_for_single_class( + detected_boxes=detected_boxes_at_ith_class, + detected_scores=detected_scores_at_ith_class, + groundtruth_boxes=gt_boxes_at_ith_class, + groundtruth_is_difficult_list= + groundtruth_is_difficult_list_at_ith_class, + groundtruth_is_group_of_list= + groundtruth_is_group_of_list_at_ith_class, + detected_masks=detected_masks_at_ith_class, + groundtruth_masks=gt_masks_at_ith_class) + result_scores.append(scores) + result_tp_fp_labels.append(tp_fp_labels) + return result_scores, result_tp_fp_labels + + def _get_overlaps_and_scores_mask_mode( + self, detected_boxes, detected_scores, detected_masks, groundtruth_boxes, + groundtruth_masks, groundtruth_is_group_of_list): + """Computes overlaps and scores between detected and groudntruth masks. + + Args: + detected_boxes: A numpy array of shape [N, 4] representing detected box + coordinates + detected_scores: A 1-d numpy array of length N representing classification + score + detected_masks: A uint8 numpy array of shape [N, height, width]. If not + None, the scores will be computed based on masks. + groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth + box coordinates + groundtruth_masks: A uint8 numpy array of shape [M, height, width]. + groundtruth_is_group_of_list: A boolean numpy array of length M denoting + whether a ground truth box has group-of tag. If a groundtruth box + is group-of box, every detection matching this box is ignored. + + Returns: + iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If + gt_non_group_of_boxlist.num_boxes() == 0 it will be None. + ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If + gt_group_of_boxlist.num_boxes() == 0 it will be None. + scores: The score of the detected boxlist. + num_boxes: Number of non-maximum suppressed detected boxes. + """ + detected_boxlist = np_box_mask_list.BoxMaskList( + box_data=detected_boxes, mask_data=detected_masks) + detected_boxlist.add_field('scores', detected_scores) + detected_boxlist = np_box_mask_list_ops.non_max_suppression( + detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold) + gt_non_group_of_boxlist = np_box_mask_list.BoxMaskList( + box_data=groundtruth_boxes[~groundtruth_is_group_of_list], + mask_data=groundtruth_masks[~groundtruth_is_group_of_list]) + gt_group_of_boxlist = np_box_mask_list.BoxMaskList( + box_data=groundtruth_boxes[groundtruth_is_group_of_list], + mask_data=groundtruth_masks[groundtruth_is_group_of_list]) + iou = np_box_mask_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist) + ioa = np_box_mask_list_ops.ioa(gt_group_of_boxlist, detected_boxlist) + scores = detected_boxlist.get_field('scores') + num_boxes = detected_boxlist.num_boxes() + return iou, ioa, scores, num_boxes + + def _get_overlaps_and_scores_box_mode( + self, + detected_boxes, + detected_scores, + groundtruth_boxes, + groundtruth_is_group_of_list): + """Computes overlaps and scores between detected and groudntruth boxes. + + Args: + detected_boxes: A numpy array of shape [N, 4] representing detected box + coordinates + detected_scores: A 1-d numpy array of length N representing classification + score + groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth + box coordinates + groundtruth_is_group_of_list: A boolean numpy array of length M denoting + whether a ground truth box has group-of tag. If a groundtruth box + is group-of box, every detection matching this box is ignored. + + Returns: + iou: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If + gt_non_group_of_boxlist.num_boxes() == 0 it will be None. + ioa: A float numpy array of size [num_detected_boxes, num_gt_boxes]. If + gt_group_of_boxlist.num_boxes() == 0 it will be None. + scores: The score of the detected boxlist. + num_boxes: Number of non-maximum suppressed detected boxes. + """ + detected_boxlist = np_box_list.BoxList(detected_boxes) + detected_boxlist.add_field('scores', detected_scores) + detected_boxlist = np_box_list_ops.non_max_suppression( + detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold) + gt_non_group_of_boxlist = np_box_list.BoxList( + groundtruth_boxes[~groundtruth_is_group_of_list]) + gt_group_of_boxlist = np_box_list.BoxList( + groundtruth_boxes[groundtruth_is_group_of_list]) + iou = np_box_list_ops.iou(detected_boxlist, gt_non_group_of_boxlist) + ioa = np_box_list_ops.ioa(gt_group_of_boxlist, detected_boxlist) + scores = detected_boxlist.get_field('scores') + num_boxes = detected_boxlist.num_boxes() + return iou, ioa, scores, num_boxes + + def _compute_tp_fp_for_single_class( + self, detected_boxes, detected_scores, groundtruth_boxes, + groundtruth_is_difficult_list, groundtruth_is_group_of_list, + detected_masks=None, groundtruth_masks=None): + """Labels boxes detected with the same class from the same image as tp/fp. + + Args: + detected_boxes: A numpy array of shape [N, 4] representing detected box + coordinates + detected_scores: A 1-d numpy array of length N representing classification + score + groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth + box coordinates + groundtruth_is_difficult_list: A boolean numpy array of length M denoting + whether a ground truth box is a difficult instance or not. If a + groundtruth box is difficult, every detection matching this box + is ignored. + groundtruth_is_group_of_list: A boolean numpy array of length M denoting + whether a ground truth box has group-of tag. If a groundtruth box + is group-of box, every detection matching this box is ignored. + detected_masks: (optional) A uint8 numpy array of shape + [N, height, width]. If not None, the scores will be computed based + on masks. + groundtruth_masks: (optional) A uint8 numpy array of shape + [M, height, width]. + + Returns: + Two arrays of the same size, containing all boxes that were evaluated as + being true positives or false positives; if a box matched to a difficult + box or to a group-of box, it is ignored. + + scores: A numpy array representing the detection scores. + tp_fp_labels: a boolean numpy array indicating whether a detection is a + true positive. + """ + if detected_boxes.size == 0: + return np.array([], dtype=float), np.array([], dtype=bool) + + mask_mode = False + if detected_masks is not None and groundtruth_masks is not None: + mask_mode = True + + if mask_mode: + (iou, ioa, scores, + num_detected_boxes) = self._get_overlaps_and_scores_mask_mode( + detected_boxes=detected_boxes, + detected_scores=detected_scores, + detected_masks=detected_masks, + groundtruth_boxes=groundtruth_boxes, + groundtruth_masks=groundtruth_masks, + groundtruth_is_group_of_list=groundtruth_is_group_of_list) + else: + (iou, ioa, scores, + num_detected_boxes) = self._get_overlaps_and_scores_box_mode( + detected_boxes=detected_boxes, + detected_scores=detected_scores, + groundtruth_boxes=groundtruth_boxes, + groundtruth_is_group_of_list=groundtruth_is_group_of_list) + + if groundtruth_boxes.size == 0: + return scores, np.zeros(num_detected_boxes, dtype=bool) + + tp_fp_labels = np.zeros(num_detected_boxes, dtype=bool) + is_matched_to_difficult_box = np.zeros(num_detected_boxes, dtype=bool) + is_matched_to_group_of_box = np.zeros(num_detected_boxes, dtype=bool) + + # The evaluation is done in two stages: + # 1. All detections are matched to non group-of boxes; true positives are + # determined and detections matched to difficult boxes are ignored. + # 2. Detections that are determined as false positives are matched against + # group-of boxes and ignored if matched. + + # Tp-fp evaluation for non-group of boxes (if any). + if iou.shape[1] > 0: + groundtruth_nongroup_of_is_difficult_list = groundtruth_is_difficult_list[ + ~groundtruth_is_group_of_list] + max_overlap_gt_ids = np.argmax(iou, axis=1) + is_gt_box_detected = np.zeros(iou.shape[1], dtype=bool) + for i in range(num_detected_boxes): + gt_id = max_overlap_gt_ids[i] + if iou[i, gt_id] >= self.matching_iou_threshold: + if not groundtruth_nongroup_of_is_difficult_list[gt_id]: + if not is_gt_box_detected[gt_id]: + tp_fp_labels[i] = True + is_gt_box_detected[gt_id] = True + else: + is_matched_to_difficult_box[i] = True + + # Tp-fp evaluation for group of boxes. + if ioa.shape[0] > 0: + max_overlap_group_of_gt = np.max(ioa, axis=0) + for i in range(num_detected_boxes): + if (not tp_fp_labels[i] and not is_matched_to_difficult_box[i] and + max_overlap_group_of_gt[i] >= self.matching_iou_threshold): + is_matched_to_group_of_box[i] = True + + return scores[~is_matched_to_difficult_box + & ~is_matched_to_group_of_box], tp_fp_labels[ + ~is_matched_to_difficult_box + & ~is_matched_to_group_of_box] + + def _get_ith_class_arrays(self, detected_boxes, detected_scores, + detected_masks, detected_class_labels, + groundtruth_boxes, groundtruth_masks, + groundtruth_class_labels, class_index): + """Returns numpy arrays belonging to class with index `class_index`. + + Args: + detected_boxes: A numpy array containing detected boxes. + detected_scores: A numpy array containing detected scores. + detected_masks: A numpy array containing detected masks. + detected_class_labels: A numpy array containing detected class labels. + groundtruth_boxes: A numpy array containing groundtruth boxes. + groundtruth_masks: A numpy array containing groundtruth masks. + groundtruth_class_labels: A numpy array containing groundtruth class + labels. + class_index: An integer index. + + Returns: + gt_boxes_at_ith_class: A numpy array containing groundtruth boxes labeled + as ith class. + gt_masks_at_ith_class: A numpy array containing groundtruth masks labeled + as ith class. + detected_boxes_at_ith_class: A numpy array containing detected boxes + corresponding to the ith class. + detected_scores_at_ith_class: A numpy array containing detected scores + corresponding to the ith class. + detected_masks_at_ith_class: A numpy array containing detected masks + corresponding to the ith class. + """ + selected_groundtruth = (groundtruth_class_labels == class_index) + gt_boxes_at_ith_class = groundtruth_boxes[selected_groundtruth] + if groundtruth_masks is not None: + gt_masks_at_ith_class = groundtruth_masks[selected_groundtruth] + else: + gt_masks_at_ith_class = None + selected_detections = (detected_class_labels == class_index) + detected_boxes_at_ith_class = detected_boxes[selected_detections] + detected_scores_at_ith_class = detected_scores[selected_detections] + if detected_masks is not None: + detected_masks_at_ith_class = detected_masks[selected_detections] + else: + detected_masks_at_ith_class = None + return (gt_boxes_at_ith_class, gt_masks_at_ith_class, + detected_boxes_at_ith_class, detected_scores_at_ith_class, + detected_masks_at_ith_class) + + def _remove_invalid_boxes(self, detected_boxes, detected_scores, + detected_class_labels, detected_masks=None): + """Removes entries with invalid boxes. + + A box is invalid if either its xmax is smaller than its xmin, or its ymax + is smaller than its ymin. + + Args: + detected_boxes: A float numpy array of size [num_boxes, 4] containing box + coordinates in [ymin, xmin, ymax, xmax] format. + detected_scores: A float numpy array of size [num_boxes]. + detected_class_labels: A int32 numpy array of size [num_boxes]. + detected_masks: A uint8 numpy array of size [num_boxes, height, width]. + + Returns: + valid_detected_boxes: A float numpy array of size [num_valid_boxes, 4] + containing box coordinates in [ymin, xmin, ymax, xmax] format. + valid_detected_scores: A float numpy array of size [num_valid_boxes]. + valid_detected_class_labels: A int32 numpy array of size + [num_valid_boxes]. + valid_detected_masks: A uint8 numpy array of size + [num_valid_boxes, height, width]. + """ + valid_indices = np.logical_and(detected_boxes[:, 0] < detected_boxes[:, 2], + detected_boxes[:, 1] < detected_boxes[:, 3]) + detected_boxes = detected_boxes[valid_indices] + detected_scores = detected_scores[valid_indices] + detected_class_labels = detected_class_labels[valid_indices] + if detected_masks is not None: + detected_masks = detected_masks[valid_indices] + return [ + detected_boxes, detected_scores, detected_class_labels, detected_masks + ] diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/standard_fields.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/standard_fields.py new file mode 100644 index 0000000..3420bd2 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/datasets/evaluation/ava/pascal_evaluation/standard_fields.py @@ -0,0 +1,214 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Contains classes specifying naming conventions used for object detection. + + +Specifies: + InputDataFields: standard fields used by reader/preprocessor/batcher. + DetectionResultFields: standard fields returned by object detector. + BoxListFields: standard field used by BoxList + TfExampleFields: standard fields for tf-example dataset format (go/tf-example). +""" + + +class InputDataFields(object): + """Names for the input tensors. + + Holds the standard dataset field names to use for identifying input tensors. This + should be used by the decoder to identify keys for the returned tensor_dict + containing input tensors. And it should be used by the model to identify the + tensors it needs. + + Attributes: + image: image. + original_image: image in the original input size. + key: unique key corresponding to image. + source_id: source of the original image. + filename: original filename of the dataset (without common path). + groundtruth_image_classes: image-level class labels. + groundtruth_boxes: coordinates of the ground truth boxes in the image. + groundtruth_classes: box-level class labels. + groundtruth_label_types: box-level label types (e.g. explicit negative). + groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead] + is the groundtruth a single object or a crowd. + groundtruth_area: area of a groundtruth segment. + groundtruth_difficult: is a `difficult` object + groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the + same class, forming a connected group, where instances are heavily + occluding each other. + proposal_boxes: coordinates of object proposal boxes. + proposal_objectness: objectness score of each proposal. + groundtruth_instance_masks: ground truth instance masks. + groundtruth_instance_boundaries: ground truth instance boundaries. + groundtruth_instance_classes: instance mask-level class labels. + groundtruth_keypoints: ground truth keypoints. + groundtruth_keypoint_visibilities: ground truth keypoint visibilities. + groundtruth_label_scores: groundtruth label scores. + groundtruth_weights: groundtruth weight factor for bounding boxes. + num_groundtruth_boxes: number of groundtruth boxes. + true_image_shapes: true shapes of images in the resized images, as resized + images can be padded with zeros. + """ + image = 'image' + original_image = 'original_image' + key = 'key' + source_id = 'source_id' + filename = 'filename' + groundtruth_image_classes = 'groundtruth_image_classes' + groundtruth_boxes = 'groundtruth_boxes' + groundtruth_classes = 'groundtruth_classes' + groundtruth_label_types = 'groundtruth_label_types' + groundtruth_is_crowd = 'groundtruth_is_crowd' + groundtruth_area = 'groundtruth_area' + groundtruth_difficult = 'groundtruth_difficult' + groundtruth_group_of = 'groundtruth_group_of' + proposal_boxes = 'proposal_boxes' + proposal_objectness = 'proposal_objectness' + groundtruth_instance_masks = 'groundtruth_instance_masks' + groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' + groundtruth_instance_classes = 'groundtruth_instance_classes' + groundtruth_keypoints = 'groundtruth_keypoints' + groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' + groundtruth_label_scores = 'groundtruth_label_scores' + groundtruth_weights = 'groundtruth_weights' + num_groundtruth_boxes = 'num_groundtruth_boxes' + true_image_shape = 'true_image_shape' + + +class DetectionResultFields(object): + """Naming conventions for storing the output of the detector. + + Attributes: + source_id: source of the original image. + key: unique key corresponding to image. + detection_boxes: coordinates of the detection boxes in the image. + detection_scores: detection scores for the detection boxes in the image. + detection_classes: detection-level class labels. + detection_masks: contains a segmentation mask for each detection box. + detection_boundaries: contains an object boundary for each detection box. + detection_keypoints: contains detection keypoints for each detection box. + num_detections: number of detections in the batch. + """ + + source_id = 'source_id' + key = 'key' + detection_boxes = 'detection_boxes' + detection_scores = 'detection_scores' + detection_classes = 'detection_classes' + detection_masks = 'detection_masks' + detection_boundaries = 'detection_boundaries' + detection_keypoints = 'detection_keypoints' + num_detections = 'num_detections' + + +class BoxListFields(object): + """Naming conventions for BoxLists. + + Attributes: + boxes: bounding box coordinates. + classes: classes per bounding box. + scores: scores per bounding box. + weights: sample weights per bounding box. + objectness: objectness score per bounding box. + masks: masks per bounding box. + boundaries: boundaries per bounding box. + keypoints: keypoints per bounding box. + keypoint_heatmaps: keypoint heatmaps per bounding box. + """ + boxes = 'boxes' + classes = 'classes' + scores = 'scores' + weights = 'weights' + objectness = 'objectness' + masks = 'masks' + boundaries = 'boundaries' + keypoints = 'keypoints' + keypoint_heatmaps = 'keypoint_heatmaps' + + +class TfExampleFields(object): + """TF-example proto feature names for object detection. + + Holds the standard feature names to load from an Example proto for object + detection. + + Attributes: + image_encoded: JPEG encoded string + image_format: image format, e.g. "JPEG" + filename: filename + channels: number of channels of image + colorspace: colorspace, e.g. "RGB" + height: height of image in pixels, e.g. 462 + width: width of image in pixels, e.g. 581 + source_id: original source of the image + object_class_text: labels in text format, e.g. ["person", "cat"] + object_class_label: labels in numbers, e.g. [16, 8] + object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30 + object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40 + object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50 + object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70 + object_view: viewpoint of object, e.g. ["frontal", "left"] + object_truncated: is object truncated, e.g. [true, false] + object_occluded: is object occluded, e.g. [true, false] + object_difficult: is object difficult, e.g. [true, false] + object_group_of: is object a single object or a group of objects + object_depiction: is object a depiction + object_is_crowd: [DEPRECATED, use object_group_of instead] + is the object a single object or a crowd + object_segment_area: the area of the segment. + object_weight: a weight factor for the object's bounding box. + instance_masks: instance segmentation masks. + instance_boundaries: instance boundaries. + instance_classes: Classes for each instance segmentation mask. + detection_class_label: class label in numbers. + detection_bbox_ymin: ymin coordinates of a detection box. + detection_bbox_xmin: xmin coordinates of a detection box. + detection_bbox_ymax: ymax coordinates of a detection box. + detection_bbox_xmax: xmax coordinates of a detection box. + detection_score: detection score for the class label and box. + """ + image_encoded = 'image/encoded' + image_format = 'image/format' # format is reserved keyword + filename = 'image/filename' + channels = 'image/channels' + colorspace = 'image/colorspace' + height = 'image/height' + width = 'image/width' + source_id = 'image/source_id' + object_class_text = 'image/object/class/text' + object_class_label = 'image/object/class/label' + object_bbox_ymin = 'image/object/bbox/ymin' + object_bbox_xmin = 'image/object/bbox/xmin' + object_bbox_ymax = 'image/object/bbox/ymax' + object_bbox_xmax = 'image/object/bbox/xmax' + object_view = 'image/object/view' + object_truncated = 'image/object/truncated' + object_occluded = 'image/object/occluded' + object_difficult = 'image/object/difficult' + object_group_of = 'image/object/group_of' + object_depiction = 'image/object/depiction' + object_is_crowd = 'image/object/is_crowd' + object_segment_area = 'image/object/segment/area' + object_weight = 'image/object/weight' + instance_masks = 'image/segmentation/object' + instance_boundaries = 'image/boundaries/object' + instance_classes = 'image/segmentation/object/class' + detection_class_label = 'image/detection/label' + detection_bbox_ymin = 'image/detection/bbox/ymin' + detection_bbox_xmin = 'image/detection/bbox/xmin' + detection_bbox_ymax = 'image/detection/bbox/ymax' + detection_bbox_xmax = 'image/detection/bbox/xmax' + detection_score = 'image/detection/score' diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__init__.py new file mode 100644 index 0000000..bf4c822 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__init__.py @@ -0,0 +1,5 @@ +from .distributed import DistributedSampler +from .grouped_batch_sampler import GroupedBatchSampler +from .iteration_based_batch_sampler import IterationBasedBatchSampler + +__all__ = ["DistributedSampler", "GroupedBatchSampler", "IterationBasedBatchSampler"] diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..713ff06 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..365d831 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/distributed.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/distributed.cpython-37.pyc new file mode 100644 index 0000000..25e95b6 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/distributed.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/distributed.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/distributed.cpython-39.pyc new file mode 100644 index 0000000..7ba0877 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/distributed.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/grouped_batch_sampler.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/grouped_batch_sampler.cpython-37.pyc new file mode 100644 index 0000000..adfde35 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/grouped_batch_sampler.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/grouped_batch_sampler.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/grouped_batch_sampler.cpython-39.pyc new file mode 100644 index 0000000..73edeb8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/grouped_batch_sampler.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/iteration_based_batch_sampler.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/iteration_based_batch_sampler.cpython-37.pyc new file mode 100644 index 0000000..1b851b8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/iteration_based_batch_sampler.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/iteration_based_batch_sampler.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/iteration_based_batch_sampler.cpython-39.pyc new file mode 100644 index 0000000..4b4a018 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/__pycache__/iteration_based_batch_sampler.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/distributed.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/distributed.py new file mode 100644 index 0000000..3b127d9 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/distributed.py @@ -0,0 +1,65 @@ +# Code is copy-pasted exactly as in torch.utils.dataset.distributed. +# FIXME remove this once c10d fixes the bug it has +import math +import torch +import torch.distributed as dist +from torch.utils.data.sampler import Sampler + + +class DistributedSampler(Sampler): + """Sampler that restricts dataset loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + if self.shuffle: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset : offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/grouped_batch_sampler.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/grouped_batch_sampler.py new file mode 100644 index 0000000..dff001f --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/grouped_batch_sampler.py @@ -0,0 +1,108 @@ +# Modified based on https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py +import itertools + +import torch +from torch.utils.data.sampler import BatchSampler +from torch.utils.data.sampler import Sampler + + +class GroupedBatchSampler(BatchSampler): + """ + Wraps another sampler to yield a mini-batch of indices. + It enforces that elements from the same group should appear in groups of batch_size. + It also tries to provide mini-batches which follows an ordering which is + as close as possible to the ordering from the original sampler. + + Arguments: + sampler (Sampler): Base sampler. + batch_size (int): Size of mini-batch. + drop_uneven (bool): If ``True``, the sampler will drop the batches whose + size is less than ``batch_size`` + + """ + + def __init__(self, sampler, group_ids, batch_size, drop_uneven=False): + if not isinstance(sampler, Sampler): + raise ValueError( + "sampler should be an instance of " + "torch.utils.dataset.Sampler, but got sampler={}".format(sampler) + ) + self.sampler = sampler + self.group_ids = torch.as_tensor(group_ids) + assert self.group_ids.dim() == 1 + self.batch_size = batch_size + self.drop_uneven = drop_uneven + + self.groups = torch.unique(self.group_ids).sort(0)[0] + + def _prepare_batches(self): + dataset_size = len(self.group_ids) + # get the sampled indices from the sampler + sampled_ids = torch.as_tensor(list(self.sampler)) + # potentially not all elements of the dataset were sampled + # by the sampler (e.g., DistributedSampler). + # construct a tensor which contains -1 if the element was + # not sampled, and a non-negative number indicating the + # order where the element was sampled. + # for example. if sampled_ids = [3, 1] and dataset_size = 5, + # the order is [-1, 1, -1, 0, -1] + order = torch.full((dataset_size,), -1, dtype=torch.int64) + order[sampled_ids] = torch.arange(len(sampled_ids)) + + # get a mask with the elements that were sampled + mask = order >= 0 + + # find the elements that belong to each individual cluster + clusters = [(self.group_ids == i) & mask for i in self.groups] + # get relative order of the elements inside each cluster + # that follows the order from the sampler + relative_order = [order[cluster] for cluster in clusters] + # with the relative order, find the absolute order in the + # sampled space + permutation_ids = [s[s.sort()[1]] for s in relative_order] + # permute each cluster so that they follow the order from + # the sampler + permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] + + # splits each cluster in batch_size, and merge as a list of tensors + splits = [c.split(self.batch_size) for c in permuted_clusters] + merged = tuple(itertools.chain.from_iterable(splits)) + + # now each batch internally has the right order, but + # they are grouped by clusters. Find the permutation between + # different batches that brings them as close as possible to + # the order that we have in the sampler. For that, we will consider the + # ordering as coming from the first element of each batch, and sort + # correspondingly + first_element_of_batch = [t[0].item() for t in merged] + # get and inverse mapping from sampled indices and the position where + # they occur (as returned by the sampler) + inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())} + # from the first element in each batch, get a relative ordering + first_index_of_batch = torch.as_tensor( + [inv_sampled_ids_map[s] for s in first_element_of_batch] + ) + + # permute the batches so that they approximately follow the order + # from the sampler + permutation_order = first_index_of_batch.sort(0)[1].tolist() + # finally, permute the batches + batches = [merged[i].tolist() for i in permutation_order] + + if self.drop_uneven: + kept = [] + for batch in batches: + if len(batch) == self.batch_size: + kept.append(batch) + batches = kept + return batches + + def __iter__(self): + batches = self._prepare_batches() + self._batches = batches + return iter(batches) + + def __len__(self): + if not hasattr(self, "_batches"): + self._batches = self._prepare_batches() + return len(self._batches) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/iteration_based_batch_sampler.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/iteration_based_batch_sampler.py new file mode 100644 index 0000000..7d3f5a5 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/samplers/iteration_based_batch_sampler.py @@ -0,0 +1,31 @@ +# Adopted from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py +from torch.utils.data.sampler import BatchSampler + + +class IterationBasedBatchSampler(BatchSampler): + """ + Wraps a BatchSampler, resampling from it until + a specified number of iterations have been sampled + """ + + def __init__(self, batch_sampler, num_iterations, start_iter=0): + self.batch_sampler = batch_sampler + self.num_iterations = num_iterations + self.start_iter = start_iter + + def __iter__(self): + iteration = self.start_iter + while iteration <= self.num_iterations: + # if the underlying sampler has a set_epoch method, like + # DistributedSampler, used for making each process see + # a different split of the dataset, then set it + if hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(iteration) + for batch in self.batch_sampler: + iteration += 1 + if iteration > self.num_iterations: + break + yield batch + + def __len__(self): + return self.num_iterations diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__init__.py new file mode 100644 index 0000000..1f06f9b --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__init__.py @@ -0,0 +1 @@ +from .build import build_transforms, build_object_transforms \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..5d31357 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..c7c2249 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/build.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/build.cpython-37.pyc new file mode 100644 index 0000000..d1e9672 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/build.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/build.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/build.cpython-39.pyc new file mode 100644 index 0000000..7829900 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/build.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/object_transforms.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/object_transforms.cpython-37.pyc new file mode 100644 index 0000000..b83d36e Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/object_transforms.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/object_transforms.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/object_transforms.cpython-39.pyc new file mode 100644 index 0000000..4771949 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/object_transforms.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/video_transforms.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/video_transforms.cpython-37.pyc new file mode 100644 index 0000000..1d819d3 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/video_transforms.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/video_transforms.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/video_transforms.cpython-39.pyc new file mode 100644 index 0000000..ec495a2 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/__pycache__/video_transforms.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/build.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/build.py new file mode 100644 index 0000000..ac2848f --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/build.py @@ -0,0 +1,65 @@ +from . import video_transforms as T +from . import object_transforms as OT + + +def build_transforms(cfg, is_train=True): + # build transforms for training of testing + if is_train: + min_size = cfg.INPUT.MIN_SIZE_TRAIN + max_size = cfg.INPUT.MAX_SIZE_TRAIN + color_jitter = cfg.INPUT.COLOR_JITTER + flip_prob = 0.5 + slow_jitter = cfg.INPUT.SLOW_JITTER + else: + min_size = cfg.INPUT.MIN_SIZE_TEST + max_size = cfg.INPUT.MAX_SIZE_TEST + color_jitter = False + flip_prob = 0 + slow_jitter = False + + frame_num = cfg.INPUT.FRAME_NUM + sample_rate = cfg.INPUT.FRAME_SAMPLE_RATE + + if color_jitter: + color_transform = T.ColorJitter( + cfg.INPUT.HUE_JITTER, cfg.INPUT.SAT_JITTER, cfg.INPUT.VAL_JITTER + ) + else: + color_transform = T.Identity() + + to_bgr = cfg.INPUT.TO_BGR + normalize_transform = T.Normalize( + mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr=to_bgr + ) + + tau = cfg.INPUT.TAU + alpha = cfg.INPUT.ALPHA + + transform = T.Compose( + [ + T.TemporalCrop(frame_num, sample_rate), + T.Resize(min_size, max_size), + color_transform, + T.RandomHorizontalFlip(flip_prob), + T.ToTensor(), + normalize_transform, + T.SlowFastCrop(tau, alpha, slow_jitter), + ] + ) + + return transform + + +def build_object_transforms(cfg, is_train=True): + # build transforms for object boxes, should be kept consistent with video transforms. + if is_train: + flip_prob = 0.5 + else: + flip_prob = 0 + + transform = OT.Compose([ + OT.PickTop(cfg.MODEL.IA_STRUCTURE.MAX_OBJECT), + OT.Resize(), + OT.RandomHorizontalFlip(flip_prob) + ]) + return transform \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/object_transforms.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/object_transforms.py new file mode 100644 index 0000000..e298779 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/object_transforms.py @@ -0,0 +1,42 @@ + +class Compose(object): + # Class used to compose different kinds of object transforms + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, object_boxes, transform_randoms): + #should reuse the random varaible in video transforms + for t in self.transforms: + object_boxes = t(object_boxes, transform_randoms) + return object_boxes + + +class PickTop(object): + # pick top scored object boxes. + def __init__(self, top_k): + self.top_k = top_k + + def __call__(self, objects, _): + objects = objects.top_k(self.top_k) + return objects + + +class Resize(object): + def __call__(self, object_boxes, transform_randoms): + # resize according to video transforms + size = transform_randoms["Resize"] + if object_boxes is not None: + object_boxes = object_boxes.resize(size) + return object_boxes + + +class RandomHorizontalFlip(object): + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, object_boxes, transform_randoms): + # flip according to video transforms + flip_random = transform_randoms["Flip"] + if flip_random < self.prob: + object_boxes.transpose(0) + return object_boxes diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/video_transforms.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/video_transforms.py new file mode 100644 index 0000000..f856167 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/dataset/transforms/video_transforms.py @@ -0,0 +1,245 @@ +import torch +import random +import numpy as np +import cv2 + +cv2.setNumThreads(0) + + +class Compose(object): + # Compose different kinds of video transforms into one. + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, videos, target): + transform_randoms = {} + for t in self.transforms: + videos, target, transform_randoms = t((videos, target, transform_randoms)) + return videos, target, transform_randoms + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string + + +class TemporalCrop(object): + def __init__(self, frame_num, sample_rate, temporal_jitter=0): + self.frame_num = frame_num + self.sample_rate = sample_rate + self.temporal_jitter = temporal_jitter + + def __call__(self, results): + clip, target, transform_randoms = results + # crop the input frames from raw clip + raw_frame_num = clip.shape[0] + + # determine frame start based on frame_num, sample_rate and the jitter shift. + frame_start = (raw_frame_num - self.frame_num * self.sample_rate) // 2 + ( + self.sample_rate - 1) // 2 + self.temporal_jitter + idx = np.arange(frame_start, frame_start + self.frame_num * self.sample_rate, self.sample_rate) + idx = np.clip(idx, 0, raw_frame_num - 1) + + clip = clip[idx] + return clip, target, transform_randoms + + +class Resize(object): + def __init__(self, min_size, max_size): + self.min_size = min_size + self.max_size = max_size + + def get_size(self, image_size): + # Calculate output size according to min_size, max_size. + h, w = image_size + size = self.min_size + + # # max_size###### + # max_size = self.max_size + # if max_size is not None: + # min_original_size = float(min((w, h))) + # max_original_size = float(max((w, h))) + # if max_original_size / min_original_size * size > max_size: + # size = int(round(max_size * min_original_size / max_original_size)) + # # max_size###### + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (ow, oh) + + def __call__(self, results): + # Input clip should be [TxHxWxC](uint8) ndarray. + clip, target, transform_randoms = results + if isinstance(clip, list): + clip = np.array(clip) + size = self.get_size(clip.shape[1:3]) + clip_new = np.zeros((clip.shape[0], size[1], size[0], clip.shape[3]), dtype=np.uint8) + for i in range(clip.shape[0]): + cv2.resize(clip[i], size, clip_new[i]) + if target is not None: + target = target.resize(size) + # Store the size for object box transforms. + transform_randoms["Resize"] = size + return clip_new, target, transform_randoms + + +class RandomClip(object): + def __init__(self, is_train): + self.is_train = is_train + self.size = 224 # (w, h) + + def __call__(self, results): + # Input clip should be [TxHxWxC](uint8) ndarray. + # size = self.get_size(clip.shape[1:3]) + clip, target, transform_randoms = results + if self.is_train: + size = self.size + clip_new = np.zeros((clip.shape[0], size, size, clip.shape[3]), dtype=np.uint8) + + image_height, image_width = clip.shape[1], clip.shape[2] + self.tl_x = random.random() + self.tl_y = random.random() + x1 = int(self.tl_x * (image_width - size)) + y1 = int(self.tl_y * (image_height - size)) + x2 = x1 + size + y2 = y1 + size + + for i in range(clip.shape[0]): + # cv2.resize(clip[i], size, clip_new[i]) + assert clip_new[i].shape == clip[i, y1:y2, x1:x2, :].shape, \ + print('x1={}, y1={}, x2={}, y2={}, ori_size={}'.format(x1, y1, x2, y2, clip.shape)) + clip_new[i] = clip[i, y1:y2, x1:x2, :] + if target is not None: + # target = target.resize(size) + target = target.crop([x1, y1, x2, y2]) + else: + clip_new = clip + # Store the size for object box transforms. + # transform_randoms["Resize"] = size + return clip_new, target, transform_randoms + + +class ColorJitter(object): + def __init__(self, hue_shift, sat_shift, val_shift): + # color jitter in hsv space. H: 0~360, circular S: 0.0~1.0 V: 0.0~1.0 + self.hue_bound = int(round(hue_shift / 2)) + self.sat_bound = int(round(sat_shift * 255)) + self.val_bound = int(round(val_shift * 255)) + + def __call__(self, results): + # Convert: RGB->HSV + clip, target, transform_randoms = results + clip_hsv = np.zeros_like(clip) + for i in range(clip.shape[0]): + cv2.cvtColor(clip[i], cv2.COLOR_RGB2HSV, clip_hsv[i]) + clip_hsv = clip_hsv.astype(np.int32) + + # Jittering. + hue_s = random.randint(-self.hue_bound, self.hue_bound) + clip_hsv[..., 0] = (clip_hsv[..., 0] + hue_s + 180) % 180 + + sat_s = random.randint(-self.sat_bound, self.sat_bound) + clip_hsv[..., 1] = np.clip(clip_hsv[..., 1] + sat_s, 0, 255) + + val_s = random.randint(-self.val_bound, self.val_bound) + clip_hsv[..., 2] = np.clip(clip_hsv[..., 2] + val_s, 0, 255) + + clip_hsv = clip_hsv.astype(np.uint8) + + # Convert: HSV->RGB + clip = np.zeros_like(clip) + for i in range(clip.shape[0]): + cv2.cvtColor(clip_hsv[i], cv2.COLOR_HSV2RGB, clip[i]) + + return clip, target, transform_randoms + + +class RandomHorizontalFlip(object): + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, results): + # Input clip should be [TxHxWxC] ndarray.(uint8) + clip, target, transform_randoms = results + flip_random = random.random() + if flip_random < self.prob: + clip = np.flip(clip, 2) + if target is not None: + target = target.transpose(0) + + # Store the random variable for object box transforms + transform_randoms["Flip"] = flip_random + return clip, target, transform_randoms + + +class ToTensor(object): + def __call__(self, results): + # Input clip should be [TxHxWxC] ndarray. + # Convert to [CxTxHxW] tensor. + clip, target, transform_randoms = results + return torch.from_numpy(clip.transpose((3, 0, 1, 2)).astype(np.float32)), target, transform_randoms + + +class Normalize(object): + def __init__(self, mean, std, to_bgr=False): + self.mean = mean + self.std = std + self.to_bgr = to_bgr + + def video_normalize(self, tensor, mean, std): + # Copied from torchvision.transforms.functional.normalize but remove the type check of tensor. + for t, m, s in zip(tensor, mean, std): + t.sub_(m).div_(s) + return tensor + + def __call__(self, results): + clip, target, transform_randoms = results + if self.to_bgr: + clip = clip[[2, 1, 0]] + # normalize: (x-mean)/std + clip = self.video_normalize(clip, self.mean, self.std) + return clip, target, transform_randoms + + +class SlowFastCrop(object): + # Class used to split frames for slow pathway and fast pathway. + def __init__(self, tau, alpha, slow_jitter=False): + self.tau = tau + self.alpha = alpha + self.slow_jitter = slow_jitter + + def __call__(self, results): + clip, target, transform_randoms = results + if self.slow_jitter: + # if jitter, random choose a start + slow_start = random.randint(0, self.tau - 1) + else: + # if no jitter, select the middle + slow_start = (self.tau - 1) // 2 + slow_clip = clip[:, slow_start::self.tau, :, :] + + fast_stride = self.tau // self.alpha + fast_start = (fast_stride - 1) // 2 + fast_clip = clip[:, fast_start::fast_stride, :, :] + + return [slow_clip, fast_clip], target, transform_randoms + + +class Identity(object): + # Return what is received. Do nothing. + def __init__(self): + pass + + def __call__(self, results): + return results diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/inference.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/inference.py new file mode 100644 index 0000000..53d8156 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/inference.py @@ -0,0 +1,192 @@ +# modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/engine/inference.py +import logging +import os + +import torch +from tqdm import tqdm +import time +import datetime + +from alphaction.dataset.datasets.evaluation import evaluate +from alphaction.utils.comm import get_rank, is_main_process, all_gather, gather, synchronize, get_world_size +from alphaction.structures.memory_pool import MemoryPool + + +def compute_on_dataset_1stage(model, data_loader, device): + # single stage inference, for model without memory features + cpu_device = torch.device("cpu") + results_dict = {} + if get_world_size() == 1: + extra_args = {} + else: + rank = get_rank() + extra_args = dict(desc="rank {}".format(rank)) + for batch in tqdm(data_loader, **extra_args): + slow_clips, fast_clips, boxes, objects, extras, video_ids = batch + slow_clips = slow_clips.to(device) + fast_clips = fast_clips.to(device) + boxes = [box.to(device) for box in boxes] + objects = [None if (box is None) else box.to(device) for box in objects] + + with torch.no_grad(): + output = model(slow_clips, fast_clips, boxes, objects, extras) + output = [o.to(cpu_device) for o in output] + results_dict.update( + {video_id: result for video_id, result in zip(video_ids, output)} + ) + + return results_dict + + +def compute_on_dataset_2stage(model, data_loader, device, logger): + # two stage inference, for model with memory features. + # first extract features and then do the inference + cpu_device = torch.device("cpu") + num_devices = get_world_size() + dataset = data_loader.dataset + if num_devices == 1: + extra_args = {} + else: + rank = get_rank() + extra_args = dict(desc="rank {}".format(rank)) + + loader_len = len(data_loader) + person_feature_pool = MemoryPool() + batch_info_list = [None]*loader_len + logger.info("Stage 1: extracting clip features.") + start_time = time.time() + + for i, batch in enumerate(tqdm(data_loader, **extra_args)): + slow_clips, fast_clips, boxes, objects, extras, video_ids = batch + slow_clips = slow_clips.to(device) + fast_clips = fast_clips.to(device) + boxes = [box.to(device) for box in boxes] + objects = [None if (box is None) else box.to(device) for box in objects] + movie_ids = [e["movie_id"] for e in extras] + timestamps = [e["timestamp"] for e in extras] + with torch.no_grad(): + feature = model(slow_clips, fast_clips, boxes, objects, part_forward=0) + person_feature = [ft.to(cpu_device) for ft in feature[0]] + object_feature = [ft.to(cpu_device) for ft in feature[1]] + # store person features into memory pool + for movie_id, timestamp, p_ft, o_ft in zip(movie_ids, timestamps, person_feature, object_feature): + person_feature_pool[movie_id, timestamp] = p_ft + # store other information in list, for further inference + batch_info_list[i] = (movie_ids, timestamps, video_ids, object_feature) + + # gather feature pools from different ranks + synchronize() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + logger.info( + "Stage 1 time: {} ({} s / video per device, on {} devices)".format( + total_time_str, total_time * num_devices / len(dataset), num_devices + ) + ) + feature_pool = all_gather(person_feature_pool) + all_feature_pool_p = MemoryPool() + all_feature_pool_p.update_list(feature_pool) + del feature_pool, person_feature_pool + + # do the inference + results_dict = {} + logger.info("Stage 2: predicting with extracted feature.") + start_time = time.time() + for movie_ids, timestamps, video_ids, object_feature in tqdm(batch_info_list, **extra_args): + current_feat_p = [all_feature_pool_p[movie_id, timestamp].to(device) + for movie_id, timestamp in zip(movie_ids, timestamps)] + current_feat_o = [ft_o.to(device) for ft_o in object_feature] + extras = dict( + person_pool=all_feature_pool_p, + movie_ids=movie_ids, + timestamps=timestamps, + current_feat_p=current_feat_p, + current_feat_o=current_feat_o, + ) + with torch.no_grad(): + output = model(None, None, None, None, extras=extras, part_forward=1) + output = [o.to(cpu_device) for o in output] + results_dict.update( + {video_id: result for video_id, result in zip(video_ids, output)} + ) + synchronize() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + logger.info( + "Stage 2 time: {} ({} s / video per device, on {} devices)".format( + total_time_str, total_time * num_devices / len(dataset), num_devices + ) + ) + + return results_dict + + +def compute_on_dataset(model, data_loader, device, logger, mem_active): + model.eval() + if mem_active: + results_dict = compute_on_dataset_2stage(model, data_loader, device, logger) + else: + results_dict = compute_on_dataset_1stage(model, data_loader, device) + + return results_dict + + +def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu): + all_predictions = gather(predictions_per_gpu) + if not is_main_process(): + return + # merge the list of dicts + predictions = {} + for p in all_predictions: + predictions.update(p) + # convert a dict where the key is the index in a list + video_ids = list(sorted(predictions.keys())) + if len(video_ids) != video_ids[-1] + 1: + logger = logging.getLogger("alphaction.inference") + logger.warning( + "Number of videos that were gathered from multiple processes is not " + "a contiguous set. Some images might be missing from the evaluation" + ) + + # convert to a list + predictions = [predictions[i] for i in video_ids] + return predictions + + +def inference( + model, + data_loader, + dataset_name, + mem_active=False, + output_folder=None, +): + # convert to a torch.device for efficiency + device = torch.device("cuda") + num_devices = get_world_size() + logger = logging.getLogger("alphaction.inference") + dataset = data_loader.dataset + logger.info("Start evaluation on {} dataset({} videos).".format(dataset_name, len(dataset))) + start_time = time.time() + predictions = compute_on_dataset(model, data_loader, device, logger, mem_active) + # wait for all processes to complete before measuring the time + synchronize() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + logger.info( + "Total inference time: {} ({} s / video per device, on {} devices)".format( + total_time_str, total_time * num_devices / len(dataset), num_devices + ) + ) + + predictions = _accumulate_predictions_from_multiple_gpus(predictions) + if not is_main_process(): + return + + if output_folder: + torch.save(predictions, os.path.join(output_folder, "predictions.pth")) + + return evaluate( + dataset=dataset, + predictions=predictions, + output_folder=output_folder, + ) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/trainer.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/trainer.py new file mode 100644 index 0000000..c6192b3 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/engine/trainer.py @@ -0,0 +1,171 @@ +import datetime +import logging +import time + +import torch + +from alphaction.utils.metric_logger import MetricLogger +from alphaction.engine.inference import inference +from alphaction.utils.comm import synchronize, reduce_dict, all_gather +from alphaction.structures.memory_pool import MemoryPool + + +def do_train( + model, + data_loader, + optimizer, + scheduler, + checkpointer, + device, + checkpoint_period, + arguments, + tblogger, + val_period, + dataset_names_val, + data_loaders_val, + distributed, + mem_active, +): + logger = logging.getLogger("alphaction.trainer") + logger.info("Start training") + meters = MetricLogger(delimiter=" ") + max_iter = len(data_loader) + start_iter = arguments["iteration"] + person_pool = arguments["person_pool"] + model.train() + start_training_time = time.time() + end = time.time() + losses_reduced = torch.tensor(0.0) + + for iteration, (slow_video, fast_video, boxes, objects, extras, _) in enumerate(data_loader, start_iter): + data_time = time.time() - end + iteration = iteration + 1 + arguments["iteration"] = iteration + + slow_video = slow_video.to(device) + fast_video = fast_video.to(device) + boxes = [box.to(device) for box in boxes] + objects = [None if (box is None) else box.to(device) for box in objects] + mem_extras = {} + if mem_active: + movie_ids = [e["movie_id"] for e in extras] + timestamps = [e["timestamp"] for e in extras] + mem_extras["person_pool"] = person_pool + mem_extras["movie_ids"] = movie_ids + mem_extras["timestamps"] = timestamps + mem_extras["cur_loss"] = losses_reduced.item() + + loss_dict, weight_dict, metric_dict, pooled_feature = model(slow_video, fast_video, boxes, objects, mem_extras) + + losses = sum([loss_dict[k]*weight_dict[k] for k in loss_dict]) + + # reduce losses over all GPUs for logging purposes + loss_dict["total_loss"] = losses.detach().clone() + loss_dict_reduced = reduce_dict(loss_dict) + metric_dict_reduced = reduce_dict(metric_dict) + meters.update(**loss_dict_reduced, **metric_dict_reduced) + losses_reduced = loss_dict_reduced.pop("total_loss") + + optimizer.zero_grad() + losses.backward() + optimizer.step() + + # update mem pool + if mem_active: + person_feature = [ft.to("cpu") for ft in pooled_feature[0]] + + feature_update = MemoryPool() + for movie_id, timestamp, new_feature in zip(movie_ids, timestamps, person_feature): + new_feature.add_field("loss_tag", losses_reduced.item()) + feature_update[movie_id, timestamp] = new_feature + + feature_update_list = all_gather(feature_update) + person_pool.update_list(feature_update_list) + + batch_time = time.time() - end + end = time.time() + meters.update(time=batch_time, data=data_time) + + eta_seconds = meters.time.global_avg * (max_iter - iteration) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + + if iteration % 20 == 0 or iteration == max_iter: + logger.info( + meters.delimiter.join( + [ + "eta: {eta}", + "iter: {iter}", + "{meters}", + "lr: {lr:.6f}", + "max mem: {memory:.0f}", + ] + ).format( + eta=eta_string, + iter=iteration, + meters=str(meters), + lr=optimizer.param_groups[0]["lr"], + memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0, + ) + ) + if tblogger is not None: + for name, meter in meters.meters.items(): + tblogger.add_scalar(name, meter.median, iteration) + tblogger.add_scalar("lr", optimizer.param_groups[0]["lr"], iteration) + + scheduler.step() + + if iteration % checkpoint_period == 0: + checkpointer.save("model_{:07d}".format(iteration), **arguments) + + if iteration == max_iter: + arguments.pop("person_pool", None) + checkpointer.save("model_final", **arguments) + + if dataset_names_val and iteration % val_period == 0: + # do validation + val_in_train( + model, + dataset_names_val, + data_loaders_val, + tblogger, + iteration, + distributed, + mem_active + ) + end = time.time() + + total_training_time = time.time() - start_training_time + total_time_str = str(datetime.timedelta(seconds=total_training_time)) + logger.info( + "Total training time: {} ({:.4f} s / it)".format( + total_time_str, total_training_time / (max_iter) + ) + ) + + +def val_in_train( + model, + dataset_names_val, + data_loaders_val, + tblogger, + iteration, + distributed, + mem_active, +): + if distributed: + model_val = model.module + else: + model_val = model + for dataset_name, data_loader_val in zip(dataset_names_val, data_loaders_val): + eval_res = inference( + model_val, + data_loader_val, + dataset_name, + mem_active, + ) + synchronize() + if tblogger is not None: + eval_res, _ = eval_res + total_mAP = eval_res['PascalBoxes_Precision/mAP@0.5IOU'] + tblogger.add_scalar(dataset_name + '_mAP_0.5IOU', total_mAP, iteration) + model.train() \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__init__.py new file mode 100644 index 0000000..e8ff8c7 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__init__.py @@ -0,0 +1,19 @@ +import torch + +from .roi_align_3d import ROIAlign3d +# from .roi_align_3d import roi_align_3d +from .roi_pool_3d import ROIPool3d +from .roi_pool_3d import roi_pool_3d +from .batch_norm import FrozenBatchNorm1d, FrozenBatchNorm2d, FrozenBatchNorm3d +from .sigmoid_focal_loss import SigmoidFocalLoss +from .softmax_focal_loss import SoftmaxFocalLoss + +# __all__ = ["roi_align_3d", "ROIAlign3d", "roi_pool_3d", "ROIPool3d", +# "SigmoidFocalLoss", "SoftmaxFocalLoss", "FrozenBatchNorm1d", +# "FrozenBatchNorm2d", "FrozenBatchNorm3d", +# ] +__all__ = ["ROIAlign3d", "roi_pool_3d", "ROIPool3d", + "SigmoidFocalLoss", "SoftmaxFocalLoss", "FrozenBatchNorm1d", + "FrozenBatchNorm2d", "FrozenBatchNorm3d", + ] + diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..9a59e3e Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..8245475 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/batch_norm.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/batch_norm.cpython-37.pyc new file mode 100644 index 0000000..1d56f93 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/batch_norm.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/batch_norm.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/batch_norm.cpython-39.pyc new file mode 100644 index 0000000..4f60db2 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/batch_norm.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_align_3d.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_align_3d.cpython-37.pyc new file mode 100644 index 0000000..d2a16b1 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_align_3d.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_align_3d.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_align_3d.cpython-39.pyc new file mode 100644 index 0000000..1a454e9 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_align_3d.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_pool_3d.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_pool_3d.cpython-37.pyc new file mode 100644 index 0000000..23b9408 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_pool_3d.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_pool_3d.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_pool_3d.cpython-39.pyc new file mode 100644 index 0000000..3bd0ba9 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/roi_pool_3d.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/sigmoid_focal_loss.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/sigmoid_focal_loss.cpython-37.pyc new file mode 100644 index 0000000..93744a8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/sigmoid_focal_loss.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/sigmoid_focal_loss.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/sigmoid_focal_loss.cpython-39.pyc new file mode 100644 index 0000000..b01447a Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/sigmoid_focal_loss.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/softmax_focal_loss.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/softmax_focal_loss.cpython-37.pyc new file mode 100644 index 0000000..d962804 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/softmax_focal_loss.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/softmax_focal_loss.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/softmax_focal_loss.cpython-39.pyc new file mode 100644 index 0000000..ff2b11c Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/__pycache__/softmax_focal_loss.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/batch_norm.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/batch_norm.py new file mode 100644 index 0000000..d8a720c --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/batch_norm.py @@ -0,0 +1,85 @@ +import torch +from torch import nn + + +class _FrozenBatchNorm(nn.Module): + def __init__(self, num_features, eps=1e-5, affine=True, track_running_stats=True): + super(_FrozenBatchNorm, self).__init__() + self.num_features = num_features + self.eps = eps + self.affine = affine + self.track_running_stats = track_running_stats + if self.affine: + self.register_buffer("weight", torch.Tensor(num_features)) + self.register_buffer("bias", torch.Tensor(num_features)) + else: + self.register_buffer("weight", None) + self.register_buffer("bias", None) + if self.track_running_stats: + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + else: + self.register_parameter('running_mean', None) + self.register_parameter('running_var', None) + self.reset_parameters() + + def reset_running_stats(self): + if self.track_running_stats: + self.running_mean.zero_() + self.running_var.fill_(1) + + def reset_parameters(self): + self.reset_running_stats() + if self.affine: + self.weight.data.uniform_() + self.bias.data.zero_() + + def _check_input_dim(self, input): + raise NotImplementedError + + def forward(self, input): + self._check_input_dim(input) + view_shape = (1, self.num_features) + (1,) * (input.dim() - 2) + + if self.track_running_stats: + scale = self.weight / (self.running_var + self.eps).sqrt() + bias = self.bias - self.running_mean * scale + else: + scale = self.weight + bias = self.bias + + return scale.view(*view_shape) * input + bias.view(*view_shape) + + def extra_repr(self): + return '{num_features}, eps={eps}, affine={affine}, ' \ + 'track_running_stats={track_running_stats}'.format(**self.__dict__) + + def _load_from_state_dict(self, state_dict, prefix, metadata, strict, + missing_keys, unexpected_keys, error_msgs): + num_batches_tracked_key = prefix + 'num_batches_tracked' + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + super(_FrozenBatchNorm, self)._load_from_state_dict( + state_dict, prefix, metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + +class FrozenBatchNorm1d(_FrozenBatchNorm): + def _check_input_dim(self, input): + if input.dim() != 2 and input.dim() != 3: + raise ValueError('expected 2D or 3D input (got {}D input)' + .format(input.dim())) + + +class FrozenBatchNorm2d(_FrozenBatchNorm): + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)' + .format(input.dim())) + + +class FrozenBatchNorm3d(_FrozenBatchNorm): + def _check_input_dim(self, input): + if input.dim() != 5: + raise ValueError('expected 5D input (got {}D input)' + .format(input.dim())) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/roi_align_3d.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/roi_align_3d.py new file mode 100644 index 0000000..89127da --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/roi_align_3d.py @@ -0,0 +1,71 @@ +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +# import alphaction._custom_cuda_ext as _C +from torchvision.ops import roi_align + + +# class _ROIAlign3d(Function): +# @staticmethod +# def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): +# ctx.save_for_backward(roi) +# ctx.output_size = _pair(output_size) +# ctx.spatial_scale = spatial_scale +# ctx.sampling_ratio = sampling_ratio +# ctx.input_shape = input.size() +# output = _C.roi_align_3d_forward( +# input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio +# ) +# return output +# +# @staticmethod +# @once_differentiable +# def backward(ctx, grad_output): +# rois, = ctx.saved_tensors +# output_size = ctx.output_size +# spatial_scale = ctx.spatial_scale +# sampling_ratio = ctx.sampling_ratio +# bs, ch, l, h, w = ctx.input_shape +# grad_input = _C.roi_align_3d_backward( +# grad_output, +# rois, +# spatial_scale, +# output_size[0], +# output_size[1], +# bs, +# ch, +# l, +# h, +# w, +# sampling_ratio, +# ) +# return grad_input, None, None, None, None +# +# +# roi_align_3d = _ROIAlign3d.apply + + +class ROIAlign3d(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio): + super(ROIAlign3d, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + # return roi_align_3d( + # input, rois, self.output_size, self.spatial_scale, self.sampling_ratio + # ) + # aia提供的algin不支持半精度,先使用torchvision + return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ")" + return tmpstr diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/roi_pool_3d.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/roi_pool_3d.py new file mode 100644 index 0000000..cea515a --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/roi_pool_3d.py @@ -0,0 +1,62 @@ +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +# import alphaction._custom_cuda_ext as _C + + +class _ROIPool3d(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale): + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.input_shape = input.size() + output, argmax = _C.roi_pool_3d_forward( + input, roi, spatial_scale, output_size[0], output_size[1] + ) + ctx.save_for_backward(input, roi, argmax) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, rois, argmax = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + bs, ch, l, h, w = ctx.input_shape + grad_input = _C.roi_pool_3d_backward( + grad_output, + input, + rois, + argmax, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + l, + h, + w, + ) + return grad_input, None, None, None + + +roi_pool_3d = _ROIPool3d.apply + + +class ROIPool3d(nn.Module): + def __init__(self, output_size, spatial_scale): + super(ROIPool3d, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + + def forward(self, input, rois): + return roi_pool_3d(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ")" + return tmpstr \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/sigmoid_focal_loss.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/sigmoid_focal_loss.py new file mode 100644 index 0000000..fa55477 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/sigmoid_focal_loss.py @@ -0,0 +1,61 @@ +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +# import alphaction._custom_cuda_ext as _C + + +class _SigmoidFocalLoss(Function): + @staticmethod + def forward(ctx, logits, targets, gamma, alpha): + ctx.save_for_backward(logits, targets) + ctx.gamma = gamma + ctx.alpha = alpha + + losses = _C.sigmoid_focalloss_forward( + logits, targets, gamma, alpha + ) + return losses + + @staticmethod + @once_differentiable + def backward(ctx, d_loss): + logits, targets = ctx.saved_tensors + gamma = ctx.gamma + alpha = ctx.alpha + d_logits = _C.sigmoid_focalloss_backward( + logits, targets, d_loss, gamma, alpha + ) + return d_logits, None, None, None + + +def sigmoid_focal_loss(logits, targets, gamma, alpha, reduction='mean'): + assert reduction in ["none", "mean", "sum"], "Unsupported reduction type \"{}\"".format(reduction) + logits = logits.float() + targets = targets.float() + + ret = _SigmoidFocalLoss.apply(logits, targets, gamma, alpha) + if reduction != "none": + ret = torch.mean(ret) if reduction == "mean" else torch.sum(ret) + + return ret + + +class SigmoidFocalLoss(nn.Module): + def __init__(self, gamma, alpha, reduction="mean"): + super(SigmoidFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + + def forward(self, logits, targets): + loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha, self.reduction) + return loss + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "gamma=" + str(self.gamma) + tmpstr += ", alpha=" + str(self.alpha) + tmpstr += ")" + return tmpstr diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/softmax_focal_loss.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/softmax_focal_loss.py new file mode 100644 index 0000000..384df45 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/softmax_focal_loss.py @@ -0,0 +1,61 @@ +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +# import alphaction._custom_cuda_ext as _C + + +class _SoftmaxFocalLoss(Function): + @staticmethod + def forward(ctx, logits, targets, gamma, alpha): + ctx.gamma = gamma + ctx.alpha = alpha + + losses, P = _C.softmax_focalloss_forward( + logits, targets, gamma, alpha + ) + ctx.save_for_backward(logits, targets, P) + return losses + + @staticmethod + @once_differentiable + def backward(ctx, d_loss): + logits, targets, P = ctx.saved_tensors + gamma = ctx.gamma + alpha = ctx.alpha + d_logits = _C.softmax_focalloss_backward( + logits, targets, P, d_loss, gamma, alpha + ) + return d_logits, None, None, None + + +def softmax_focal_loss(logits, targets, gamma, alpha, reduction='mean'): + assert reduction in ["none", "mean", "sum"], "Unsupported reduction type \"{}\"".format(reduction) + logits = logits.float() + targets = targets.int() + + ret = _SoftmaxFocalLoss.apply(logits, targets, gamma, alpha) + if reduction != "none": + ret = torch.mean(ret) if reduction == "mean" else torch.sum(ret) + + return ret + + +class SoftmaxFocalLoss(nn.Module): + def __init__(self, gamma, alpha, reduction="mean"): + super(SoftmaxFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + + def forward(self, logits, targets): + loss = softmax_focal_loss(logits, targets, self.gamma, self.alpha, self.reduction) + return loss + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "gamma=" + str(self.gamma) + tmpstr += ", alpha=" + str(self.alpha) + tmpstr += ")" + return tmpstr diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..3c9b0e2 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..d4432f5 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/poolers.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/poolers.cpython-37.pyc new file mode 100644 index 0000000..7400b7c Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/poolers.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/poolers.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/poolers.cpython-39.pyc new file mode 100644 index 0000000..b2c654d Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/poolers.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/utils.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000..713a53d Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/utils.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/utils.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000..4796cf6 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/__pycache__/utils.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/__init__.py new file mode 100644 index 0000000..1becdc5 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/__init__.py @@ -0,0 +1 @@ +from .backbone import build_backbone \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/backbone.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/backbone.py new file mode 100644 index 0000000..1c08afc --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/backbone.py @@ -0,0 +1,23 @@ +from alphaction.modeling import registry +from . import slowfast, i3d + +@registry.BACKBONES.register("Slowfast-Resnet50") +@registry.BACKBONES.register("Slowfast-Resnet101") +def build_slowfast_resnet_backbone(cfg): + model = slowfast.SlowFast(cfg) + return model + +@registry.BACKBONES.register("I3D-Resnet50") +@registry.BACKBONES.register("I3D-Resnet101") +@registry.BACKBONES.register("I3D-Resnet50-Sparse") +@registry.BACKBONES.register("I3D-Resnet101-Sparse") +def build_i3d_resnet_backbone(cfg): + model = i3d.I3D(cfg) + return model + +def build_backbone(cfg): + assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \ + "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format( + cfg.MODEL.BACKBONE.CONV_BODY + ) + return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/i3d.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/i3d.py new file mode 100644 index 0000000..561cb5d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/i3d.py @@ -0,0 +1,148 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import torch.nn as nn +from alphaction.layers import FrozenBatchNorm3d +from alphaction.modeling.common_blocks import ResNLBlock + + +def get_model_cfg(cfg): + backbone_strs = cfg.MODEL.BACKBONE.CONV_BODY.split('-')[1:] + error_msg = 'Model backbone {} is not supported.'.format(cfg.MODEL.BACKBONE.CONV_BODY) + + use_temp_convs_1 = [2] + temp_strides_1 = [2] + max_pool_stride_1 = 2 + + use_temp_convs_2 = [1, 1, 1] + temp_strides_2 = [1, 1, 1] + max_pool_stride_2 = 2 + + use_temp_convs_3 = [1, 0, 1, 0] + temp_strides_3 = [1, 1, 1, 1] + + use_temp_convs_5 = [0, 1, 0] + temp_strides_5 = [1, 1, 1] + + avg_pool_stride = int(cfg.INPUT.FRAME_NUM / 8) + if backbone_strs[0] == 'Resnet50': + block_config = (3, 4, 6, 3) + + use_temp_convs_4 = [1, 0, 1, 0, 1, 0] + temp_strides_4 = [1, 1, 1, 1, 1, 1] + elif backbone_strs[0] == 'Resnet101': + block_config = (3, 4, 23, 3) + + use_temp_convs_4 = [] + for i in range(23): + if i % 2 == 0: + use_temp_convs_4.append(1) + else: + use_temp_convs_4.append(0) + temp_strides_4 = [1, ] * 23 + else: + raise KeyError(error_msg) + + if len(backbone_strs) > 1: + if len(backbone_strs) == 2 and backbone_strs[1] == 'Sparse': + temp_strides_1 = [1] + max_pool_stride_1 = 1 + avg_pool_stride = int(cfg.INPUT.FRAME_NUM / 2) + else: + raise KeyError(error_msg) + + use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5] + temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5] + pool_strides_set = [max_pool_stride_1, max_pool_stride_2, avg_pool_stride] + return block_config, use_temp_convs_set, temp_strides_set, pool_strides_set + + +class I3D(nn.Module): + def __init__(self, cfg): + super(I3D, self).__init__() + + self.cfg = cfg.clone() + + block_config, use_temp_convs_set, temp_strides_set, pool_strides_set = get_model_cfg(cfg) + conv3_nonlocal = cfg.MODEL.BACKBONE.I3D.CONV3_NONLOCAL + conv4_nonlocal = cfg.MODEL.BACKBONE.I3D.CONV4_NONLOCAL + + dim_inner = 64 + conv_dims = [64, 256, 512, 1024, 2048] + self.dim_out = conv_dims[-1] + n1, n2, n3, n4 = block_config + layer_mod = 2 + conv3_nl_mod = layer_mod + conv4_nl_mod = layer_mod + if not conv3_nonlocal: + conv3_nl_mod = 1000 + if not conv4_nonlocal: + conv4_nl_mod = 1000 + self.c2_mapping = None + + data_dim = 3 + self.conv1 = nn.Conv3d(data_dim, conv_dims[0], (1 + use_temp_convs_set[0][0] * 2, 7, 7), + stride=(temp_strides_set[0][0], 2, 2), + padding=(use_temp_convs_set[0][0], 3, 3), bias=False) + nn.init.kaiming_normal_(self.conv1.weight) + + if cfg.MODEL.BACKBONE.FROZEN_BN: + self.bn1 = FrozenBatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON) + nn.init.constant_(self.bn1.weight, 1.0) + nn.init.constant_(self.bn1.bias, 0.0) + else: + self.bn1 = nn.BatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON, momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) + + self.relu = nn.ReLU(inplace=True) + self.maxpool1 = nn.MaxPool3d((pool_strides_set[0], 3, 3), stride=(pool_strides_set[0], 2, 2)) + + self.res_nl1 = ResNLBlock(cfg, conv_dims[0], conv_dims[1], stride=1, num_blocks=n1, dim_inner=dim_inner, + use_temp_convs=use_temp_convs_set[1], temp_strides=temp_strides_set[1]) + self.maxpool2 = nn.MaxPool3d((pool_strides_set[1], 1, 1), stride=(pool_strides_set[1], 1, 1)) + + self.res_nl2 = ResNLBlock(cfg, conv_dims[1], conv_dims[2], stride=2, num_blocks=n2, + dim_inner=dim_inner * 2, use_temp_convs=use_temp_convs_set[2], + temp_strides=temp_strides_set[2], nonlocal_mod=conv3_nl_mod, + group_nonlocal=cfg.MODEL.BACKBONE.I3D.CONV3_GROUP_NL) + + self.res_nl3 = ResNLBlock(cfg, conv_dims[2], conv_dims[3], stride=2, num_blocks=n3, + dim_inner=dim_inner * 4, use_temp_convs=use_temp_convs_set[3], + temp_strides=temp_strides_set[3], nonlocal_mod=conv4_nl_mod) + + self.res_nl4 = ResNLBlock(cfg, conv_dims[3], conv_dims[4], stride=1, num_blocks=n4, + dim_inner=dim_inner * 8, use_temp_convs=use_temp_convs_set[4], + temp_strides=temp_strides_set[4], + dilation=2) + + def forward(self, _, x): + # We only use fast videos, which is the second input. + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.maxpool1(out) + + out = self.res_nl1(out) + out = self.maxpool2(out) + + out = self.res_nl2(out) + + out = self.res_nl3(out) + + out = self.res_nl4(out) + return None, out + + def c2_weight_mapping(self): + if self.c2_mapping is None: + weight_map = {'conv1.weight': 'conv1_w', + 'bn1.weight': 'res_conv1_bn_s', + 'bn1.bias': 'res_conv1_bn_b', + 'bn1.running_mean': 'res_conv1_bn_rm', + 'bn1.running_var': 'res_conv1_bn_riv'} + for i in range(1, 5): + name = 'res_nl{}'.format(i) + child_map = getattr(self, name).c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + weight_map[new_key] = val.format(i + 1) + self.c2_mapping = weight_map + return self.c2_mapping diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/slowfast.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/slowfast.py new file mode 100644 index 0000000..153758b --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/backbone/slowfast.py @@ -0,0 +1,298 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) +import torch +import torch.nn as nn + +from alphaction.modeling.common_blocks import ResNLBlock +from alphaction.layers import FrozenBatchNorm3d + + +def get_slow_model_cfg(cfg): + backbone_strs = cfg.MODEL.BACKBONE.CONV_BODY.split('-')[1:] + error_msg = 'Model backbone {} is not supported.'.format(cfg.MODEL.BACKBONE.CONV_BODY) + + use_temp_convs_1 = [0] + temp_strides_1 = [1] + max_pool_stride_1 = 1 + + use_temp_convs_2 = [0, 0, 0] + temp_strides_2 = [1, 1, 1] + + use_temp_convs_3 = [0, 0, 0, 0] + temp_strides_3 = [1, 1, 1, 1] + + use_temp_convs_5 = [1, 1, 1] + temp_strides_5 = [1, 1, 1] + + slow_stride = cfg.INPUT.TAU + avg_pool_stride = int(cfg.INPUT.FRAME_NUM / slow_stride) + if backbone_strs[0] == 'Resnet50': + block_config = (3, 4, 6, 3) + + use_temp_convs_4 = [1, 1, 1, 1, 1, 1] + temp_strides_4 = [1, 1, 1, 1, 1, 1] + elif backbone_strs[0] == 'Resnet101': + block_config = (3, 4, 23, 3) + + use_temp_convs_4 = [1, ] * 23 + temp_strides_4 = [1, ] * 23 + else: + raise KeyError(error_msg) + + if len(backbone_strs) > 1: + raise KeyError(error_msg) + + use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5] + temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5] + pool_strides_set = [max_pool_stride_1, avg_pool_stride] + return block_config, use_temp_convs_set, temp_strides_set, pool_strides_set + + +def get_fast_model_cfg(cfg): + backbone_strs = cfg.MODEL.BACKBONE.CONV_BODY.split('-')[1:] + error_msg = 'Model backbone {} is not supported.'.format(cfg.MODEL.BACKBONE.CONV_BODY) + + use_temp_convs_1 = [2] + temp_strides_1 = [1] + max_pool_stride_1 = 1 + + use_temp_convs_2 = [1, 1, 1] + temp_strides_2 = [1, 1, 1] + + use_temp_convs_3 = [1, 1, 1, 1] + temp_strides_3 = [1, 1, 1, 1] + + use_temp_convs_5 = [1, 1, 1] + temp_strides_5 = [1, 1, 1] + + fast_stride = cfg.INPUT.TAU // cfg.INPUT.ALPHA + avg_pool_stride = int(cfg.INPUT.FRAME_NUM / fast_stride) + + if backbone_strs[0] == 'Resnet50': + block_config = (3, 4, 6, 3) + + use_temp_convs_4 = [1, 1, 1, 1, 1, 1] + temp_strides_4 = [1, 1, 1, 1, 1, 1] + elif backbone_strs[0] == 'Resnet101': + block_config = (3, 4, 23, 3) + + use_temp_convs_4 = [1, ] * 23 + temp_strides_4 = [1, ] * 23 + else: + raise KeyError(error_msg) + + if len(backbone_strs) > 1: + raise KeyError(error_msg) + + use_temp_convs_set = [use_temp_convs_1, use_temp_convs_2, use_temp_convs_3, use_temp_convs_4, use_temp_convs_5] + temp_strides_set = [temp_strides_1, temp_strides_2, temp_strides_3, temp_strides_4, temp_strides_5] + pool_strides_set = [max_pool_stride_1, avg_pool_stride] + return block_config, use_temp_convs_set, temp_strides_set, pool_strides_set + +class LateralBlock(nn.Module): + def __init__(self, conv_dim, alpha): + super(LateralBlock, self).__init__() + self.conv = nn.Conv3d(conv_dim, conv_dim * 2, kernel_size=(5, 1, 1), stride=(alpha, 1, 1), + padding=(2, 0, 0), bias=True) + nn.init.kaiming_normal_(self.conv.weight) + nn.init.constant_(self.conv.bias, 0.0) + + def forward(self, x): + out = self.conv(x) + return out + + +class FastPath(nn.Module): + def __init__(self, cfg): + super(FastPath, self).__init__() + + self.cfg = cfg.clone() + + block_config, use_temp_convs_set, temp_strides_set, pool_strides_set = get_fast_model_cfg(cfg) + conv3_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.FAST.CONV3_NONLOCAL + conv4_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.FAST.CONV4_NONLOCAL + + dim_inner = 8 + conv_dims = [8, 32, 64, 128, 256] + self.dim_out = conv_dims[-1] + n1, n2, n3, n4 = block_config + layer_mod = 2 + conv3_nl_mod = layer_mod + conv4_nl_mod = layer_mod + if not conv3_nonlocal: + conv3_nl_mod = 1000 + if not conv4_nonlocal: + conv4_nl_mod = 1000 + self.c2_mapping = None + + self.conv1 = nn.Conv3d(3, conv_dims[0], (1 + use_temp_convs_set[0][0] * 2, 7, 7), + stride=(temp_strides_set[0][0], 2, 2), + padding=(use_temp_convs_set[0][0], 3, 3), bias=False) + nn.init.kaiming_normal_(self.conv1.weight) + + if cfg.MODEL.BACKBONE.FROZEN_BN: + self.bn1 = FrozenBatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON) + nn.init.constant_(self.bn1.weight, 1.0) + nn.init.constant_(self.bn1.bias, 0.0) + else: + self.bn1 = nn.BatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON, + momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) + + self.relu = nn.ReLU(inplace=True) + self.maxpool1 = nn.MaxPool3d((pool_strides_set[0], 3, 3), stride=(pool_strides_set[0], 2, 2)) + + self.res_nl1 = ResNLBlock(cfg, conv_dims[0], conv_dims[1], stride=1, num_blocks=n1, dim_inner=dim_inner, + use_temp_convs=use_temp_convs_set[1], temp_strides=temp_strides_set[1]) + + self.res_nl2 = ResNLBlock(cfg, conv_dims[1], conv_dims[2], stride=2, num_blocks=n2, + dim_inner=dim_inner * 2, use_temp_convs=use_temp_convs_set[2], + temp_strides=temp_strides_set[2], nonlocal_mod=conv3_nl_mod, + group_nonlocal=cfg.MODEL.BACKBONE.SLOWFAST.FAST.CONV3_GROUP_NL) + + self.res_nl3 = ResNLBlock(cfg, conv_dims[2], conv_dims[3], stride=2, num_blocks=n3, + dim_inner=dim_inner * 4, use_temp_convs=use_temp_convs_set[3], + temp_strides=temp_strides_set[3], nonlocal_mod=conv4_nl_mod) + + self.res_nl4 = ResNLBlock(cfg, conv_dims[3], conv_dims[4], stride=1, num_blocks=n4, + dim_inner=dim_inner * 8, use_temp_convs=use_temp_convs_set[4], + temp_strides=temp_strides_set[4], + dilation=2) + + if cfg.MODEL.BACKBONE.SLOWFAST.LATERAL == 'tconv': + self._tconv(conv_dims) + + def _tconv(self, conv_dims): + alpha = self.cfg.INPUT.ALPHA + self.Tconv1 = LateralBlock(conv_dims[0], alpha) + self.Tconv2 = LateralBlock(conv_dims[1], alpha) + self.Tconv3 = LateralBlock(conv_dims[2], alpha) + self.Tconv4 = LateralBlock(conv_dims[3], alpha) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.maxpool1(out) + tconv1 = self.Tconv1(out) + + out = self.res_nl1(out) + tconv2 = self.Tconv2(out) + + out = self.res_nl2(out) + tconv3 = self.Tconv3(out) + + out = self.res_nl3(out) + tconv4 = self.Tconv4(out) + + out = self.res_nl4(out) + return out, [tconv1, tconv2, tconv3, tconv4] + + +class SlowPath(nn.Module): + def __init__(self, cfg): + super(SlowPath, self).__init__() + + self.cfg = cfg.clone() + + block_config, use_temp_convs_set, temp_strides_set, pool_strides_set = get_slow_model_cfg(cfg) + conv3_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.SLOW.CONV3_NONLOCAL + conv4_nonlocal = cfg.MODEL.BACKBONE.SLOWFAST.SLOW.CONV4_NONLOCAL + + dim_inner = 64 + conv_dims = [64, 256, 512, 1024, 2048] + self.dim_out = conv_dims[-1] + n1, n2, n3, n4 = block_config + layer_mod = 2 + conv3_nl_mod = layer_mod + conv4_nl_mod = layer_mod + if not conv3_nonlocal: + conv3_nl_mod = 1000 + if not conv4_nonlocal: + conv4_nl_mod = 1000 + self.c2_mapping = None + + self.conv1 = nn.Conv3d(3, conv_dims[0], (1 + use_temp_convs_set[0][0] * 2, 7, 7), + stride=(temp_strides_set[0][0], 2, 2), + padding=(use_temp_convs_set[0][0], 3, 3), bias=False) + nn.init.kaiming_normal_(self.conv1.weight) + + if cfg.MODEL.BACKBONE.FROZEN_BN: + self.bn1 = FrozenBatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON) + nn.init.constant_(self.bn1.weight, 1.0) + nn.init.constant_(self.bn1.bias, 0.0) + else: + self.bn1 = nn.BatchNorm3d(conv_dims[0], eps=cfg.MODEL.BACKBONE.BN_EPSILON, + momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) + + self.relu = nn.ReLU(inplace=True) + self.maxpool1 = nn.MaxPool3d((pool_strides_set[0], 3, 3), stride=(pool_strides_set[0], 2, 2)) + + self.res_nl1 = ResNLBlock(cfg, conv_dims[0], conv_dims[1], stride=1, num_blocks=n1, dim_inner=dim_inner, + use_temp_convs=use_temp_convs_set[1], temp_strides=temp_strides_set[1], + lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE) + + self.res_nl2 = ResNLBlock(cfg, conv_dims[1], conv_dims[2], stride=2, num_blocks=n2, + dim_inner=dim_inner * 2, use_temp_convs=use_temp_convs_set[2], + temp_strides=temp_strides_set[2], nonlocal_mod=conv3_nl_mod, + group_nonlocal=cfg.MODEL.BACKBONE.SLOWFAST.SLOW.CONV3_GROUP_NL, + lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE) + + self.res_nl3 = ResNLBlock(cfg, conv_dims[2], conv_dims[3], stride=2, num_blocks=n3, + dim_inner=dim_inner * 4, use_temp_convs=use_temp_convs_set[3], + temp_strides=temp_strides_set[3], nonlocal_mod=conv4_nl_mod, + lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE) + + self.res_nl4 = ResNLBlock(cfg, conv_dims[3], conv_dims[4], stride=1, num_blocks=n4, + dim_inner=dim_inner * 8, use_temp_convs=use_temp_convs_set[4], + temp_strides=temp_strides_set[4], lateral=cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE, + dilation=2) + + def forward(self, x, lateral_connection=None): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.maxpool1(out) + if lateral_connection: + out = torch.cat([out, lateral_connection[0]], dim=1) + + out = self.res_nl1(out) + if lateral_connection: + out = torch.cat([out, lateral_connection[1]], dim=1) + + out = self.res_nl2(out) + if lateral_connection: + out = torch.cat([out, lateral_connection[2]], dim=1) + + out = self.res_nl3(out) + if lateral_connection: + out = torch.cat([out, lateral_connection[3]], dim=1) + + out = self.res_nl4(out) + return out + + +class SlowFast(nn.Module): + def __init__(self, cfg): + super(SlowFast, self).__init__() + self.cfg = cfg.clone() + if cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE: + self.slow = SlowPath(cfg) + if cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: + self.fast = FastPath(cfg) + if cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE and cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: + self.dim_out = self.slow.dim_out + self.fast.dim_out + elif cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE: + self.dim_out = self.slow.dim_out + elif cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: + self.dim_out = self.fast.dim_out + + def forward(self, slow_x, fast_x): + tconv = None + cfg = self.cfg + slowout = None + fastout = None + if cfg.MODEL.BACKBONE.SLOWFAST.FAST.ACTIVE: + fastout, tconv = self.fast(fast_x) + if cfg.MODEL.BACKBONE.SLOWFAST.SLOW.ACTIVE: + slowout = self.slow(slow_x, tconv) + return slowout, fastout diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/common_blocks.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/common_blocks.py new file mode 100644 index 0000000..b5afc19 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/common_blocks.py @@ -0,0 +1,156 @@ +import torch.nn as nn +from alphaction.modeling.nonlocal_block import NLBlock +from alphaction.layers import FrozenBatchNorm3d + + +class Conv3dBN(nn.Module): + def __init__(self, cfg, dim_in, dim_out, kernels, stride, padding, dilation=1, init_weight=None): + super(Conv3dBN, self).__init__() + self.conv = nn.Conv3d(dim_in, dim_out, kernels, stride=stride, + padding=padding, dilation=dilation, bias=False) + nn.init.kaiming_normal_(self.conv.weight) + if cfg.MODEL.BACKBONE.FROZEN_BN: + self.bn = FrozenBatchNorm3d(dim_out, eps=cfg.MODEL.BACKBONE.BN_EPSILON) + nn.init.constant_(self.bn.weight, 1.0) + nn.init.constant_(self.bn.bias, 0.0) + else: + self.bn = nn.BatchNorm3d(dim_out, eps=cfg.MODEL.BACKBONE.BN_EPSILON, momentum=cfg.MODEL.BACKBONE.BN_MOMENTUM) + if init_weight is not None: + nn.init.constant_(self.bn.weight, init_weight) + + def forward(self, x): + out = self.conv(x) + out = self.bn(out) + return out + + def c2_weight_mapping(self): + return { + 'conv.weight': 'w', + 'bn.weight': 'bn_s', + 'bn.bias': 'bn_b', + 'bn.running_mean': 'bn_rm', + 'bn.running_var': 'bn_riv' + } + + +class Bottleneck(nn.Module): + def __init__(self, cfg, dim_in, dim_out, dim_inner, stride, dilation=1, use_temp_conv=1, temp_stride=1): + super(Bottleneck, self).__init__() + self.conv1 = Conv3dBN(cfg, dim_in, dim_inner, (1 + use_temp_conv * 2, 1, 1), + stride=(temp_stride, 1, 1), padding=(use_temp_conv, 0, 0)) + self.conv2 = Conv3dBN(cfg, dim_inner, dim_inner, (1, 3, 3), stride=(1, stride, stride), + dilation=(1, dilation, dilation), + padding=(0, dilation, dilation)) + self.conv3 = Conv3dBN(cfg, dim_inner, dim_out, (1, 1, 1), stride=(1, 1, 1), + padding=0, init_weight=cfg.MODEL.BACKBONE.BN_INIT_GAMMA) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + out = self.conv1(x) + out = self.relu(out) + out = self.conv2(out) + out = self.relu(out) + out = self.conv3(out) + return out + + def c2_weight_mapping(self): + weight_map = {} + for i in range(1, 4): + name = 'conv{}'.format(i) + child_map = getattr(self, name).c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + prefix = 'branch2{}_'.format(chr(ord('a') + i - 1)) + weight_map[new_key] = prefix + val + return weight_map + + +class ResBlock(nn.Module): + def __init__(self, cfg, dim_in, dim_out, dim_inner, stride, dilation=1, use_temp_conv=0, temp_stride=1, need_shortcut=False): + super(ResBlock, self).__init__() + + self.btnk = Bottleneck(cfg, dim_in, dim_out, dim_inner=dim_inner, stride=stride, dilation=dilation, + use_temp_conv=use_temp_conv, temp_stride=temp_stride) + if not need_shortcut: + self.shortcut = None + else: + self.shortcut = Conv3dBN(cfg, dim_in, dim_out, (1, 1, 1), + stride=(temp_stride, stride, stride), padding=0) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + tr = self.btnk(x) + if self.shortcut is None: + sc = x + else: + sc = self.shortcut(x) + return self.relu(tr + sc) + + def c2_weight_mapping(self): + weight_map = {} + for name, m_child in self.named_children(): + if m_child.state_dict(): + child_map = m_child.c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + if isinstance(m_child, Conv3dBN): + prefix = 'branch1_' + else: + prefix = '' + weight_map[new_key] = prefix + val + return weight_map + + +class ResNLBlock(nn.Module): + def __init__(self, cfg, dim_in, dim_out, stride, num_blocks, dim_inner, use_temp_convs, temp_strides, + dilation=1, nonlocal_mod=1000, group_nonlocal=False, lateral=False): + super(ResNLBlock, self).__init__() + self.blocks = [] + for idx in range(num_blocks): + block_name = "res_{}".format(idx) + block_stride = stride if idx == 0 else 1 + block_dilation = dilation + dim_in0 = dim_in + int(dim_in * cfg.MODEL.BACKBONE.SLOWFAST.BETA * 2) if lateral and (idx == 0) and ( + cfg.MODEL.BACKBONE.SLOWFAST.LATERAL != 'ttoc_sum') else dim_in + # To transfer weight from classification model, we change res5_0 from stride 2 to stride 1, + # and all res5_x layers from dilation 1 to dilation 2. In pretrain, res5_0 with stride 2 need a shortcut conv. + # idx==0 and dilation!=1 means that it need a short cut in pretrain stage, + # so we should keep it since we load weight from a pretrained model. + # if idx!=0, block_stride will not be larger than 1 in pretrain stage. + need_shortcut = not (dim_in0==dim_out and temp_strides[idx]==1 and block_stride==1) or \ + (idx==0 and dilation!=1) + res_module = ResBlock(cfg, dim_in0, dim_out, dim_inner=dim_inner, + stride=block_stride, + dilation=block_dilation, + use_temp_conv=use_temp_convs[idx], + temp_stride=temp_strides[idx], + need_shortcut=need_shortcut) + self.add_module(block_name, res_module) + self.blocks.append(block_name) + dim_in = dim_out + if idx % nonlocal_mod == nonlocal_mod - 1: + nl_block_name = "nonlocal_{}".format(idx) + nl_module = NLBlock(dim_in, dim_in, int(dim_in / 2), + cfg.MODEL.NONLOCAL, group=group_nonlocal) + self.add_module(nl_block_name, nl_module) + self.blocks.append(nl_block_name) + + def forward(self, x): + for layer_name in self.blocks: + x = getattr(self, layer_name)(x) + return x + + def c2_weight_mapping(self): + weight_map = {} + for name, m_child in self.named_children(): + idx = name.split('_')[-1] + if m_child.state_dict(): + child_map = m_child.c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + if isinstance(m_child, NLBlock): + prefix = 'nonlocal_conv{}_' + '{}_'.format(idx) + else: + prefix = 'res{}_' + '{}_'.format(idx) + weight_map[new_key] = prefix + val + return weight_map diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/detector/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/detector/__init__.py new file mode 100644 index 0000000..5e29e2b --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/detector/__init__.py @@ -0,0 +1 @@ +from .action_detector import build_detection_model \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/detector/action_detector.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/detector/action_detector.py new file mode 100644 index 0000000..6b6c773 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/detector/action_detector.py @@ -0,0 +1,45 @@ +from torch import nn + +from ..backbone import build_backbone +from ..roi_heads.roi_heads_3d import build_3d_roi_heads + + +class ActionDetector(nn.Module): + def __init__(self, cfg): + super(ActionDetector, self).__init__() + self.backbone = build_backbone(cfg) + self.roi_heads = build_3d_roi_heads(cfg, self.backbone.dim_out) + + def forward(self, slow_video, fast_video, boxes, objects=None, extras={}, part_forward=-1): + # part_forward is used to split this model into two parts. + # if part_forward<0, just use it as a single model + # if part_forward=0, use this model to extract pooled feature(person and object, no memory features). + # if part_forward=1, use the ia structure to aggregate interactions and give final result. + # implemented in roi_heads + + if part_forward==1: + slow_features = fast_features = None + else: + slow_features, fast_features = self.backbone(slow_video, fast_video) + + result, detector_losses, loss_weight, detector_metrics = self.roi_heads(slow_features, fast_features, boxes, objects, extras, part_forward) + + if self.training: + return detector_losses, loss_weight, detector_metrics, result + + return result + + def c2_weight_mapping(self): + if not hasattr(self, "c2_mapping"): + weight_map = {} + for name, m_child in self.named_children(): + if m_child.state_dict() and hasattr(m_child, "c2_weight_mapping"): + child_map = m_child.c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + weight_map[new_key] = val + self.c2_mapping = weight_map + return self.c2_mapping + +def build_detection_model(cfg): + return ActionDetector(cfg) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/nonlocal_block.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/nonlocal_block.py new file mode 100644 index 0000000..1a27cd1 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/nonlocal_block.py @@ -0,0 +1,135 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import torch +import torch.nn as nn +from alphaction.layers import FrozenBatchNorm3d + + +class NLBlock(nn.Module): + def __init__(self, dim_in, dim_out, dim_inner, nl_cfg, group=False): + super(NLBlock, self).__init__() + + self.nl_cfg = nl_cfg.clone() + self.group = group + self.group_size = 4 + + init_std = nl_cfg.CONV_INIT_STD + bias = not nl_cfg.NO_BIAS + pool_stride = 2 + + self.scale_value = dim_inner ** (-0.5) + self.dim_inner = dim_inner + + self.theta = nn.Conv3d(dim_in, dim_inner, 1, bias=bias) + nn.init.normal_(self.theta.weight, std=init_std) + if bias: + nn.init.constant_(self.theta.bias, 0) + + if nl_cfg.USE_MAXPOOL: + self.maxpool = nn.MaxPool3d((1, pool_stride, pool_stride), + stride=(1, pool_stride, pool_stride)) + + self.phi = nn.Conv3d(dim_in, dim_inner, 1, bias=bias) + nn.init.normal_(self.phi.weight, std=init_std) + if bias: + nn.init.constant_(self.phi.bias, 0) + + self.g = nn.Conv3d(dim_in, dim_inner, 1, bias=bias) + nn.init.normal_(self.g.weight, std=init_std) + if bias: + nn.init.constant_(self.g.bias, 0) + + if nl_cfg.USE_SOFTMAX: + self.softmax = nn.Softmax(dim=2) + + self.out = nn.Conv3d(dim_inner, dim_out, 1, bias=bias) + if nl_cfg.USE_ZERO_INIT_CONV: + nn.init.constant_(self.out.weight, 0) + else: + nn.init.normal_(self.out.weight, std=init_std) + if bias: + nn.init.constant_(self.out.bias, 0) + + if nl_cfg.USE_BN: + if nl_cfg.FROZEN_BN: + self.bn = FrozenBatchNorm3d(dim_out, eps=nl_cfg.BN_EPSILON) + else: + self.bn = nn.BatchNorm3d(dim_out, eps=nl_cfg.BN_EPSILON, momentum=nl_cfg.BN_MOMENTUM) + nn.init.constant_(self.bn.weight, nl_cfg.BN_INIT_GAMMA) + + def forward(self, x): + if x.dim() != 5: + raise ValueError('expected 4D or 5D input (got {}D input)' + .format(x.dim())) + + if self.group: + x = x.transpose(1, 2) + sz_before_group = list(x.shape) + sz_after_group = sz_before_group.copy() + sz_after_group[0] = -1 + sz_after_group[1] = self.group_size + x = x.contiguous().view(*sz_after_group) + x = x.transpose(1, 2) + + batch_size = x.shape[0] + + theta = self.theta(x) + + if self.nl_cfg.USE_MAXPOOL: + max_pool = self.maxpool(x) + else: + max_pool = x + + phi = self.phi(max_pool) + + g = self.g(max_pool) + + org_size = theta.size() + mat_size = [batch_size, self.dim_inner, -1] + theta = theta.view(*mat_size) + phi = phi.view(*mat_size) + g = g.view(*mat_size) + + theta_phi = torch.bmm(theta.transpose(1, 2), phi) + + if self.nl_cfg.USE_SOFTMAX: + if self.nl_cfg.USE_SCALE: + theta_phi_sc = theta_phi * self.scale_value + else: + theta_phi_sc = theta_phi + p = self.softmax(theta_phi_sc) + else: + p = theta_phi / theta_phi.shape[-1] + + t = torch.bmm(g, p.transpose(1, 2)) + + t = t.view(org_size) + + out = self.out(t) + + if self.nl_cfg.USE_BN: + out = self.bn(out) + out = out + x + + if self.group: + out = out.transpose(1, 2) + out = out.contiguous().view(*sz_before_group) + out = out.transpose(1, 2) + + return out + + def c2_weight_mapping(self): + weight_map = {} + for name, m_child in self.named_children(): + if m_child.state_dict(): + if isinstance(m_child, (nn.BatchNorm3d, FrozenBatchNorm3d)): + weight_map[name + '.weight'] = '{}_s'.format(name) + weight_map[name + '.running_mean'] = '{}_rm'.format(name) + weight_map[name + '.running_var'] = '{}_riv'.format(name) + elif isinstance(m_child, nn.GroupNorm): + weight_map[name + '.weight'] = '{}_s'.format(name) + else: + weight_map[name + '.weight'] = '{}_w'.format(name) + weight_map[name + '.bias'] = '{}_b'.format(name) + return weight_map diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/poolers.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/poolers.py new file mode 100644 index 0000000..0ece6ee --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/poolers.py @@ -0,0 +1,53 @@ +import torch +from torch import nn + +from alphaction.layers import ROIAlign3d, ROIPool3d + + +class Pooler3d(nn.Module): + def __init__(self, output_size, scale, sampling_ratio=None, pooler_type='align3d'): + super(Pooler3d, self).__init__() + if pooler_type == 'align3d': + assert sampling_ratio is not None, 'Sampling ratio should be specified for 3d roi align.' + self.pooler = ROIAlign3d( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio + ) + elif pooler_type == 'pooling3d': + self.pooler = ROIPool3d( + output_size, spatial_scale=scale + ) + self.output_size = output_size + + def convert_to_roi_format(self, boxes, dtype, device): + bbox_list = list() + ids_list = list() + for i, b in enumerate(boxes): + if not b: + bbox_list.append(torch.zeros((0, 4), dtype=dtype, device=device)) + ids_list.append(torch.zeros((0, 1), dtype=dtype, device=device)) + else: + bbox_list.append(b.bbox.to(dtype)) + ids_list.append(torch.full((len(b), 1), i, dtype=dtype, device=device)) + concat_boxes = torch.cat(bbox_list, dim=0) + ids = torch.cat(ids_list, dim=0) + rois = torch.cat([ids, concat_boxes], dim=1) + + return rois + + def forward(self, x, boxes): + rois = self.convert_to_roi_format(boxes, x.dtype, x.device) + return self.pooler(x, rois) + + +def make_3d_pooler(head_cfg): + resolution = head_cfg.POOLER_RESOLUTION + scale = head_cfg.POOLER_SCALE + sampling_ratio = head_cfg.POOLER_SAMPLING_RATIO + pooler_type = head_cfg.POOLER_TYPE + pooler = Pooler3d( + output_size=(resolution, resolution), + scale=scale, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + return pooler \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/registry.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/registry.py new file mode 100644 index 0000000..b81b93f --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/registry.py @@ -0,0 +1,6 @@ +from alphaction.utils.registry import Registry + +BACKBONES = Registry() +ROI_ACTION_FEATURE_EXTRACTORS = Registry() +ROI_ACTION_PREDICTORS = Registry() +INTERACTION_AGGREGATION_STRUCTURES = Registry() \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/IA_structure.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/IA_structure.py new file mode 100644 index 0000000..6f371ad --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/IA_structure.py @@ -0,0 +1,409 @@ +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import math +import torch +import torch.nn as nn +from alphaction.modeling import registry +from alphaction.utils.IA_helper import has_memory, has_person, has_object + + +class InteractionBlock(nn.Module): + def __init__(self, dim_person, dim_other, dim_out, dim_inner, structure_config, + max_others=20, + temp_pos_len=-1, # configs for memory feature + dropout=0.): + super(InteractionBlock, self).__init__() + self.dim_person = dim_person + self.dim_other = dim_other + self.dim_out = dim_out + self.dim_inner = dim_inner + self.max_others = max_others + self.scale_value = dim_inner ** (-0.5) + # config for temporal position, only used for memory feature, + self.temp_pos_len = temp_pos_len + + bias = not structure_config.NO_BIAS + init_std = structure_config.CONV_INIT_STD + + self.query = nn.Conv3d(dim_person, dim_inner, 1, bias) + init_layer(self.query, init_std, bias) + + self.key = nn.Conv3d(dim_other, dim_inner, 1, bias) + init_layer(self.key, init_std, bias) + + self.value = nn.Conv3d(dim_other, dim_inner, 1, bias) + init_layer(self.value, init_std, bias) + + self.out = nn.Conv3d(dim_inner, dim_out, 1, bias) + if structure_config.USE_ZERO_INIT_CONV: + out_init = 0 + else: + out_init = init_std + init_layer(self.out, out_init, bias) + + self.softmax = nn.Softmax(dim=2) + self.relu = nn.ReLU() + self.dropout = nn.Dropout(dropout) + + self.use_ln = structure_config.LAYER_NORM + + if dim_person != dim_out: + self.shortcut = nn.Conv3d(dim_person, dim_out, 1, bias) + init_layer(self.shortcut, init_std, bias) + else: + self.shortcut = None + + if self.temp_pos_len > 0: + self.temporal_position_k = nn.Parameter(torch.zeros(temp_pos_len, 1, self.dim_inner, 1, 1, 1)) + self.temporal_position_v = nn.Parameter(torch.zeros(temp_pos_len, 1, self.dim_inner, 1, 1, 1)) + + def forward(self, person, others): + """ + :param person: [n, channels, t, h, w] + :param others: [n, num_other, channels, t, h, w] + """ + device = person.device + n, dim_person, t, h, w = person.size() + _, max_others, dim_others, t_others, h_others, w_others = others.size() + + query_batch = person + key = fuse_batch_num(others) # [n*num_other, channels, t, h, w] + + query_batch = self.query(query_batch) + key_batch = self.key(key).contiguous().view(n, self.max_others, self.dim_inner, t_others, h_others, w_others) + value_batch = self.value(key).contiguous().view(n, self.max_others, self.dim_inner, t_others, h_others, + w_others) + + if self.temp_pos_len > 0: + max_person_per_sec = max_others // self.temp_pos_len + key_batch = key_batch.contiguous().view(n, self.temp_pos_len, max_person_per_sec, self.dim_inner, t_others, + h_others, + w_others) + key_batch = key_batch + self.temporal_position_k + key_batch = key_batch.contiguous().view(n, self.max_others, self.dim_inner, t_others, + h_others, w_others) + + value_batch = value_batch.contiguous().view(n, self.temp_pos_len, max_person_per_sec, self.dim_inner, + t_others, + h_others, w_others) + value_batch = value_batch + self.temporal_position_v + value_batch = value_batch.contiguous().view(n, self.max_others, self.dim_inner, t_others, + h_others, w_others) + + query_batch = query_batch.contiguous().view(n, self.dim_inner, -1).transpose(1, 2) # [n, thw, dim_inner] + key_batch = key_batch.contiguous().view(n, self.max_others, self.dim_inner, -1).transpose(2, 3) + key_batch = key_batch.contiguous().view(n, self.max_others * t_others * h_others * w_others, -1).transpose( + 1, 2) + + qk = torch.bmm(query_batch, key_batch) # n, thw, max_other * thw + + qk_sc = qk * self.scale_value + + weight = self.softmax(qk_sc) + + value_batch = value_batch.contiguous().view(n, self.max_others, self.dim_inner, -1).transpose(2, 3) + value_batch = value_batch.contiguous().view(n, self.max_others * t_others * h_others * w_others, -1) + out = torch.bmm(weight, value_batch) # n, thw, dim_inner + + out = out.contiguous().view(n, t * h * w, -1) + out = out.transpose(1, 2) + out = out.contiguous().view(n, self.dim_inner, t, h, w) + + if self.use_ln: + if not hasattr(self, "layer_norm"): + self.layer_norm = nn.LayerNorm([self.dim_inner, t, h, w], elementwise_affine=False).to( + device) + out = self.layer_norm(out) + + out = self.relu(out) + + out = self.out(out) + out = self.dropout(out) + + if self.shortcut: + person = self.shortcut(person) + + out = out + person + return out + + +class IAStructure(nn.Module): + def __init__(self, dim_person, dim_mem, dim_out, structure_cfg): + super(IAStructure, self).__init__() + self.dim_person = dim_person + self.dim_others = dim_mem + self.dim_inner = structure_cfg.DIM_INNER + self.dim_out = dim_out + + self.max_person = structure_cfg.MAX_PERSON + self.max_object = structure_cfg.MAX_OBJECT + self.mem_len = structure_cfg.LENGTH[0] + structure_cfg.LENGTH[1] + 1 + self.mem_feature_len = self.mem_len * structure_cfg.MAX_PER_SEC + + self.I_block_list = structure_cfg.I_BLOCK_LIST + + bias = not structure_cfg.NO_BIAS + conv_init_std = structure_cfg.CONV_INIT_STD + + self.has_P = has_person(structure_cfg) + self.has_O = has_object(structure_cfg) + self.has_M = has_memory(structure_cfg) + + self.person_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1, bias) # reduce person query + init_layer(self.person_dim_reduce, conv_init_std, bias) + self.reduce_dropout = nn.Dropout(structure_cfg.DROPOUT) + + if self.has_M: + self.mem_dim_reduce = nn.Conv3d(dim_mem, self.dim_inner, 1, bias) + init_layer(self.mem_dim_reduce, conv_init_std, bias) + if self.has_P: + self.person_key_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1, bias) # reduce person key + init_layer(self.person_key_dim_reduce, conv_init_std, bias) + if self.has_O: + self.object_dim_reduce = nn.Conv3d(dim_person, self.dim_inner, 1, bias) + init_layer(self.object_dim_reduce, conv_init_std, bias) + + def forward(self, person, person_boxes, obj_feature, object_boxes, mem_feature, ): + query, person_key, object_key, mem_key = self._reduce_dim(person, person_boxes, obj_feature, object_boxes, + mem_feature) + + return self._aggregate(person_boxes, query, person_key, object_key, mem_key) + + def _reduce_dim(self, person, person_boxes, obj_feature, object_boxes, mem_feature): + query = self.person_dim_reduce(person) + query = self.reduce_dropout(query) + n = query.size(0) + + if self.has_P: + person_key = self.person_key_dim_reduce(person) + person_key = self.reduce_dropout(person_key) + else: + person_key = None + + if self.has_O: + object_key = separate_roi_per_person(person_boxes, obj_feature, object_boxes, + self.max_object) + object_key = fuse_batch_num(object_key) + object_key = self.object_dim_reduce(object_key) + object_key = unfuse_batch_num(object_key, n, self.max_object) + object_key = self.reduce_dropout(object_key) + else: + object_key = None + + if self.has_M: + mem_key = separate_batch_per_person(person_boxes, mem_feature) + mem_key = fuse_batch_num(mem_key) + mem_key = self.mem_dim_reduce(mem_key) + mem_key = unfuse_batch_num(mem_key, n, self.mem_feature_len) + mem_key = self.reduce_dropout(mem_key) + else: + mem_key = None + + return query, person_key, object_key, mem_key + + def _aggregate(self, proposals, query, person_key, object_key, mem_key): + raise NotImplementedError + + def _make_interaction_block(self, block_type, block_name, dim_person, dim_other, dim_out, dim_inner, structure_cfg): + dropout = structure_cfg.DROPOUT + temp_pos_len = -1 + if block_type == "P": + max_others = self.max_person + elif block_type == "O": + max_others = self.max_object + elif block_type == "M": + max_others = self.mem_feature_len + if structure_cfg.TEMPORAL_POSITION: + temp_pos_len = self.mem_len + else: + raise KeyError("Unrecognized interaction block type '{}'!".format(block_type)) + + I_block = InteractionBlock(dim_person, dim_other, dim_out, dim_inner, + structure_cfg, max_others, + temp_pos_len=temp_pos_len, dropout=dropout) + + self.add_module(block_name, I_block) + + +@registry.INTERACTION_AGGREGATION_STRUCTURES.register("serial") +class SerialIAStructure(IAStructure): + def __init__(self, dim_person, dim_mem, dim_out, structure_cfg): + super(SerialIAStructure, self).__init__(dim_person, dim_mem, dim_out, structure_cfg) + + block_count = dict() + for idx, block_type in enumerate(self.I_block_list): + block_count[block_type] = block_count.get(block_type, 0) + 1 + name = block_type + "_block_{}".format(block_count[block_type]) + dim_out_trans = dim_out if (idx == len(self.I_block_list) - 1) else self.dim_inner + self._make_interaction_block(block_type, name, self.dim_inner, self.dim_inner, + dim_out_trans, self.dim_inner, structure_cfg) + + def _aggregate(self, person_boxes, query, person_key, object_key, mem_key): + block_count = dict() + for idx, block_type in enumerate(self.I_block_list): + block_count[block_type] = block_count.get(block_type, 0) + 1 + name = block_type + "_block_{}".format(block_count[block_type]) + I_block = getattr(self, name) + if block_type == "P": + person_key = separate_roi_per_person(person_boxes, person_key, person_boxes, self.max_person, ) + query = I_block(query, person_key) + elif block_type == "O": + query = I_block(query, object_key) + elif block_type == "M": + query = I_block(query, mem_key) + person_key = query + + return query + + +@registry.INTERACTION_AGGREGATION_STRUCTURES.register("dense") +class DenseSerialIAStructure(SerialIAStructure): + def __init__(self, dim_person, dim_mem, dim_out, structure_cfg): + super(DenseSerialIAStructure, self).__init__(dim_person, dim_mem, dim_out, structure_cfg) + + self.dense_params = nn.ParameterDict() + for idx in range(2, len(self.I_block_list)): + init_tensor = torch.full((idx, self.dim_inner, 1, 1, 1), -5 * math.log(10), dtype=torch.float32) + init_tensor[idx - 1, ...] = 0 + self.dense_params[str(idx)] = nn.Parameter(init_tensor) + + self.softmax = nn.Softmax(dim=0) + + def _aggregate(self, person_boxes, query, person_key, object_key, mem_key): + + block_count = dict() + history_query = [] + for idx, block_type in enumerate(self.I_block_list): + block_count[block_type] = block_count.get(block_type, 0) + 1 + name = block_type + "_block_{}".format(block_count[block_type]) + I_block = getattr(self, name) + if idx >= 2: + query = torch.stack(history_query, dim=1) + # [n, prev_idx, c, T, H, W] + query = torch.sum(query * self.softmax(self.dense_params[str(idx)]), dim=1) + person_key = query + if block_type == "P": + person_key = separate_roi_per_person(person_boxes, person_key, person_boxes, self.max_person, ) + query = I_block(query, person_key) + elif block_type == "O": + query = I_block(query, object_key) + elif block_type == "M": + query = I_block(query, mem_key) + person_key = query + history_query.append(query) + + return query + + +@registry.INTERACTION_AGGREGATION_STRUCTURES.register("parallel") +class ParallelIAStructure(IAStructure): + def __init__(self, dim_person, dim_others, dim_out, structure_cfg): + super(ParallelIAStructure, self).__init__(dim_person, dim_others, dim_out, structure_cfg) + + block_count = dict() + for layer_idx, layer in enumerate(self.I_block_list): + for block_type in layer: + block_count[block_type] = block_count.get(block_type, 0) + 1 + name = block_type + "_block_{}".format(block_count[block_type]) + dim_out_trans = dim_out if (layer_idx == len(self.I_block_list) - 1) else self.dim_inner + self._make_interaction_block(block_type, name, self.dim_inner, self.dim_inner, + dim_out_trans, self.dim_inner, structure_cfg) + + def _aggregate(self, person_boxes, query, person_key, object_key, mem_key): + + block_count = dict() + last_query = [query, ] * len(self.I_block_list[0]) + for layer_idx, layer in enumerate(self.I_block_list): + new_query = [] + for idx, block_type in enumerate(layer): + block_count[block_type] = block_count.get(block_type, 0) + 1 + name = block_type + "_block_{}".format(block_count[block_type]) + I_block = getattr(self, name) + query = last_query[idx] + if block_type == "P": + person_key = separate_roi_per_person(person_boxes, person_key, person_boxes, + self.max_person) + query = I_block(query, person_key) + person_key = query + elif block_type == "O": + query = I_block(query, object_key) + elif block_type == "M": + query = I_block(query, mem_key) + new_query.append(query) + last_query = new_query + + query = torch.sum(torch.stack(last_query), 0) + + return query + + +def separate_roi_per_person(proposals, things, other_proposals, max_things): + """ + :param things: [n2, c, t, h, w] + :param proposals: + :param max_things: + :return [n, max_other, c, t, h, w] + """ + res = [] + _, c, t, h, w = things.size() + device = things.device + index = 0 + for i, (person_box, other_box) in enumerate(zip(proposals, other_proposals)): + person_num = len(person_box) + other_num = len(other_box) + tmp = torch.zeros((person_num, max_things, c, t, h, w), device=device) + if other_num > max_things: + idx = torch.randperm(other_num)[:max_things] + tmp[:, :max_things] = things[index:index + other_num][idx] + else: + tmp[:, :other_num] = things[index:index + other_num] + + res.append(tmp) + index += other_num + features = torch.cat(res, dim=0) + return features + + +def separate_batch_per_person(proposals, things): + """ + + :param things: [b, max_others, c, t, h, w] + :return [n, max_others, c, t, h, w] + """ + res = [] + b, max_others, c, t, h, w = things.size() + device = things.device + for i, person_box in enumerate(proposals): + person_num = len(person_box) + tmp = torch.zeros((person_num, max_others, c, t, h, w), device=device) + tmp[:] = things[i] + res.append(tmp) + return torch.cat(res, dim=0) + + +def fuse_batch_num(things): + n, number, c, t, h, w = things.size() + return things.contiguous().view(-1, c, t, h, w) + + +def unfuse_batch_num(things, batch_size, num): + assert things.size(0) == batch_size * num, "dimension should matches" + _, c, t, h, w = things.size() + return things.contiguous().view(batch_size, num, c, t, h, w) + + +def init_layer(layer, init_std, bias): + if init_std == 0: + nn.init.constant_(layer.weight, 0) + else: + nn.init.normal_(layer.weight, std=init_std) + if bias: + nn.init.constant_(layer.bias, 0) + + +def make_ia_structure(cfg, dim_in): + func = registry.INTERACTION_AGGREGATION_STRUCTURES[ + cfg.MODEL.IA_STRUCTURE.STRUCTURE + ] + return func(dim_in, cfg.MODEL.IA_STRUCTURE.DIM_IN, cfg.MODEL.IA_STRUCTURE.DIM_OUT, cfg.MODEL.IA_STRUCTURE) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/action_head.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/action_head.py new file mode 100644 index 0000000..12fced1 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/action_head.py @@ -0,0 +1,94 @@ +import torch + +from .roi_action_feature_extractor import make_roi_action_feature_extractor +from .roi_action_predictors import make_roi_action_predictor +from .inference import make_roi_action_post_processor +from .loss import make_roi_action_loss_evaluator +from .metric import make_roi_action_accuracy_evaluator +from alphaction.modeling.utils import prepare_pooled_feature +from alphaction.utils.comm import all_reduce + + +class ROIActionHead(torch.nn.Module): + """ + Generic Action Head class. + """ + + def __init__(self, cfg, dim_in): + super(ROIActionHead, self).__init__() + self.feature_extractor = make_roi_action_feature_extractor(cfg, dim_in) + self.predictor = make_roi_action_predictor(cfg, self.feature_extractor.dim_out) + self.post_processor = make_roi_action_post_processor(cfg) + self.loss_evaluator = make_roi_action_loss_evaluator(cfg) + self.accuracy_evaluator = make_roi_action_accuracy_evaluator(cfg) + self.test_ext = cfg.TEST.EXTEND_SCALE + + def forward(self, slow_features, fast_features, boxes, objects=None, extras={}, part_forward=-1): + # In training stage, boxes are from gt. + # In testing stage, boxes are detected by human detector and proposals should be + # enlarged boxes. + assert not (self.training and part_forward >= 0) + + if part_forward == 1: + boxes = extras["current_feat_p"] + objects = extras["current_feat_o"] + + if self.training: + proposals = self.loss_evaluator.sample_box(boxes) + else: + proposals = [box.extend(self.test_ext) for box in boxes] + + x, x_pooled, x_objects = self.feature_extractor(slow_features, fast_features, proposals, objects, extras, part_forward) + + if part_forward == 0: + pooled_feature = prepare_pooled_feature(x_pooled, boxes) + if x_objects is None: + object_pooled_feature = None + else: + object_pooled_feature = prepare_pooled_feature(x_objects, objects) + return [pooled_feature, object_pooled_feature], {}, {}, {} + + action_logits = self.predictor(x) + + if not self.training: + result = self.post_processor((action_logits,), boxes) + return result, {}, {}, {} + + box_num = action_logits.size(0) + box_num = torch.as_tensor([box_num], dtype=torch.float32, device=action_logits.device) + all_reduce(box_num, average=True) + + loss_dict, loss_weight = self.loss_evaluator( + [action_logits], box_num.item(), + ) + + metric_dict = self.accuracy_evaluator( + [action_logits], proposals, box_num.item(), + ) + + pooled_feature = prepare_pooled_feature(x_pooled, proposals) + if x_objects is None: + object_pooled_feature = [] + else: + object_pooled_feature = prepare_pooled_feature(x_objects, objects) + + return ( + [pooled_feature, object_pooled_feature], + loss_dict, + loss_weight, + metric_dict, + ) + + def c2_weight_mapping(self): + weight_map = {} + for name, m_child in self.named_children(): + if m_child.state_dict() and hasattr(m_child, "c2_weight_mapping"): + child_map = m_child.c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + weight_map[new_key] = val + return weight_map + + +def build_roi_action_head(cfg, dim_in): + return ROIActionHead(cfg, dim_in) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/inference.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/inference.py new file mode 100644 index 0000000..1692afe --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/inference.py @@ -0,0 +1,45 @@ +import torch +from torch import nn +import torch.nn.functional as F + +from alphaction.structures.bounding_box import BoxList + + +class PostProcessor(nn.Module): + def __init__(self, pose_action_num): + super(PostProcessor, self).__init__() + self.pose_action_num = pose_action_num + + def forward(self, x, boxes): + # boxes should be (#detections,4) + # prob should be calculated in different way. + class_logits, = x + pose_action_prob = F.softmax(class_logits[:,:self.pose_action_num],-1) + interaction_action_prob = torch.sigmoid(class_logits[:,self.pose_action_num:]) + + action_prob = torch.cat((pose_action_prob,interaction_action_prob),1) + + image_shapes = [box.size for box in boxes] + boxes_per_image = [len(box) for box in boxes] + box_tensors = [a.bbox for a in boxes] + + action_prob = action_prob.split(boxes_per_image, dim=0) + + results = [] + for prob, boxes_per_image, image_shape in zip( + action_prob, box_tensors, image_shapes + ): + boxlist = self.prepare_boxlist(boxes_per_image, prob, image_shape) + results.append(boxlist) + return results + + def prepare_boxlist(self, boxes, scores, image_shape): + boxlist = BoxList(boxes, image_shape, mode="xyxy") + boxlist.add_field("scores", scores) + return boxlist + + +def make_roi_action_post_processor(cfg): + softmax_num = cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_MOVEMENT_CLASSES + postprocessor = PostProcessor(softmax_num) + return postprocessor diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/loss.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/loss.py new file mode 100644 index 0000000..1b430fd --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/loss.py @@ -0,0 +1,83 @@ +import torch +from alphaction.layers import SigmoidFocalLoss, SoftmaxFocalLoss +from alphaction.modeling.utils import cat + + +class ActionLossComputation(object): + def __init__(self, cfg): + self.proposal_per_clip = cfg.MODEL.ROI_ACTION_HEAD.PROPOSAL_PER_CLIP + self.num_pose = cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_MOVEMENT_CLASSES + self.num_object = cfg.MODEL.ROI_ACTION_HEAD.NUM_OBJECT_MANIPULATION_CLASSES + self.num_person = cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_INTERACTION_CLASSES + + self.weight_dict = dict( + loss_pose_action = cfg.MODEL.ROI_ACTION_HEAD.POSE_LOSS_WEIGHT, + loss_object_interaction = cfg.MODEL.ROI_ACTION_HEAD.OBJECT_LOSS_WEIGHT, + loss_person_interaction = cfg.MODEL.ROI_ACTION_HEAD.PERSON_LOSS_WEIGHT, + ) + + gamma = cfg.MODEL.ROI_ACTION_HEAD.FOCAL_LOSS.GAMMA + alpha = cfg.MODEL.ROI_ACTION_HEAD.FOCAL_LOSS.ALPHA + self.sigmoid_focal_loss = SigmoidFocalLoss(gamma, alpha, reduction="none") + self.softmax_focal_loss = SoftmaxFocalLoss(gamma, alpha, reduction="sum") + + def sample_box(self, boxes): + proposals = [] + num_proposals = self.proposal_per_clip + for boxes_per_image in boxes: + num_boxes = len(boxes_per_image) + + if num_boxes > num_proposals: + choice_inds = torch.randperm(num_boxes)[:num_proposals] + proposals_per_image = boxes_per_image[choice_inds] + else: + proposals_per_image = boxes_per_image + proposals_per_image = proposals_per_image.random_aug(0.2, 0.1, 0.1, 0.05) + proposals.append(proposals_per_image) + self._proposals = proposals + return proposals + + def __call__(self, class_logits, avg_box_num): + class_logits = cat(class_logits, dim=0) + assert class_logits.shape[1] == (self.num_pose + self.num_object + self.num_person), \ + "The shape of tensor class logits doesn't match total number of action classes." + + if not hasattr(self, "_proposals"): + raise RuntimeError("sample_box needs to be called before") + + proposals = self._proposals + + labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) + assert class_logits.shape[1] == labels.shape[1], \ + "The shape of tensor class logits doesn't match the label tensor." + + loss_dict = {} + + if self.num_pose > 0: + pose_label = labels[:, :self.num_pose].argmax(dim=1) + pose_logits = class_logits[:, :self.num_pose] + pose_loss = self.softmax_focal_loss(pose_logits, pose_label) / avg_box_num + loss_dict["loss_pose_action"] = pose_loss + + interaction_label = labels[:, self.num_pose:].to(dtype=torch.float32) + object_label = interaction_label[:, :self.num_object] + person_label = interaction_label[:, self.num_object:] + + interaction_logits = class_logits[:, self.num_pose:] + object_logits = interaction_logits[:, :self.num_object] + person_logits = interaction_logits[:, self.num_object:] + + if self.num_object > 0: + object_loss = self.sigmoid_focal_loss(object_logits, object_label).mean(dim=1).sum() / avg_box_num + loss_dict["loss_object_interaction"] = object_loss + if self.num_person > 0: + person_loss = self.sigmoid_focal_loss(person_logits, person_label).mean(dim=1).sum() / avg_box_num + loss_dict["loss_person_interaction"] = person_loss + + return loss_dict, self.weight_dict + + +def make_roi_action_loss_evaluator(cfg): + loss_evaluator = ActionLossComputation(cfg) + + return loss_evaluator \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/metric.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/metric.py new file mode 100644 index 0000000..fc86ebd --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/metric.py @@ -0,0 +1,65 @@ +import torch +from alphaction.modeling.utils import cat + + +class ActionAccuracyComputation(object): + def __init__(self, num_pose, num_object, num_person): + self.num_pose = num_pose + self.num_object = num_object + self.num_person = num_person + + def logic_iou(self, pred, label): + device = pred.device + + version = torch.__version__ + if eval('.'.join(version.split('.')[:2]))>=1.3: + pred = pred.bool() + label = label.bool() + + label_union = (pred | label).float().sum(dim=1) + label_inter = (pred & label).float().sum(dim=1) + replacer = torch.ones_like(label_union, device=device) + zero_mask = label_union == 0 + label_inter = torch.where(zero_mask, replacer, label_inter) + label_union = torch.where(zero_mask, replacer, label_union) + return label_inter / label_union + + def __call__(self, class_logits, proposals, avg_box_num): + class_logits = [logits.detach() for logits in class_logits] + class_logits = cat(class_logits, dim=0) + assert class_logits.shape[1] == (self.num_pose + self.num_object + self.num_person), \ + "The shape of tensor class logits doesn't match total number of action classes." + + labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0) + + metric_dict = {} + if self.num_pose>0: + pose_label = labels[:, :self.num_pose].argmax(dim=1) + pose_pred = class_logits[:, :self.num_pose].argmax(dim=1) + accuracy_pose_action = pose_label.eq(pose_pred).float().sum() + metric_dict["accuracy_pose_action"] = accuracy_pose_action / avg_box_num + + interaction_label = labels[:, self.num_pose:] + interaction_logits = class_logits[:, self.num_pose:] + interaction_pred = interaction_logits.sigmoid() > 0.5 + + if self.num_object>0: + object_label = interaction_label[:, :self.num_object] + object_pred = interaction_pred[:, :self.num_object] + accuracy_object_interaction = self.logic_iou(object_pred, object_label) + metric_dict["accuracy_object_interaction"] = accuracy_object_interaction.sum() / avg_box_num + + if self.num_person>0: + person_label = interaction_label[:, self.num_object:] + person_pred = interaction_pred[:, self.num_object:] + accuracy_person_interaction = self.logic_iou(person_pred, person_label) + metric_dict["accuracy_person_interaction"] = accuracy_person_interaction.sum() / avg_box_num + + return metric_dict + + +def make_roi_action_accuracy_evaluator(cfg): + num_pose = cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_MOVEMENT_CLASSES + num_object = cfg.MODEL.ROI_ACTION_HEAD.NUM_OBJECT_MANIPULATION_CLASSES + num_person = cfg.MODEL.ROI_ACTION_HEAD.NUM_PERSON_INTERACTION_CLASSES + return ActionAccuracyComputation(num_pose, num_object, num_person) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/roi_action_feature_extractor.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/roi_action_feature_extractor.py new file mode 100644 index 0000000..5c0f4be --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/roi_action_feature_extractor.py @@ -0,0 +1,189 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from alphaction.modeling import registry +from alphaction.modeling.poolers import make_3d_pooler +from alphaction.modeling.roi_heads.action_head.IA_structure import make_ia_structure +from alphaction.modeling.utils import cat, pad_sequence, prepare_pooled_feature +from alphaction.utils.IA_helper import has_object + + +@registry.ROI_ACTION_FEATURE_EXTRACTORS.register("2MLPFeatureExtractor") +class MLPFeatureExtractor(nn.Module): + def __init__(self, config, dim_in): + super(MLPFeatureExtractor, self).__init__() + self.config = config + head_cfg = config.MODEL.ROI_ACTION_HEAD + + self.pooler = make_3d_pooler(head_cfg) + + resolution = head_cfg.POOLER_RESOLUTION + + self.max_pooler = nn.MaxPool3d((1, resolution, resolution)) + + if config.MODEL.IA_STRUCTURE.ACTIVE: + self.max_feature_len_per_sec = config.MODEL.IA_STRUCTURE.MAX_PER_SEC + + self.ia_structure = make_ia_structure(config, dim_in) + + representation_size = head_cfg.MLP_HEAD_DIM + + fc1_dim_in = dim_in + if config.MODEL.IA_STRUCTURE.ACTIVE and (config.MODEL.IA_STRUCTURE.FUSION == "concat"): + fc1_dim_in += config.MODEL.IA_STRUCTURE.DIM_OUT + + self.fc1 = nn.Linear(fc1_dim_in, representation_size) + self.fc2 = nn.Linear(representation_size, representation_size) + + for l in [self.fc1, self.fc2]: + nn.init.kaiming_uniform_(l.weight, a=1) + nn.init.constant_(l.bias, 0) + + self.dim_out = representation_size + + def roi_pooling(self, slow_features, fast_features, proposals): + if slow_features is not None: + if self.config.MODEL.ROI_ACTION_HEAD.MEAN_BEFORE_POOLER: + slow_features = slow_features.mean(dim=2, keepdim=True) + slow_x = self.pooler(slow_features, proposals) + if not self.config.MODEL.ROI_ACTION_HEAD.MEAN_BEFORE_POOLER: + slow_x = slow_x.mean(dim=2, keepdim=True) + x = slow_x + if fast_features is not None: + if self.config.MODEL.ROI_ACTION_HEAD.MEAN_BEFORE_POOLER: + fast_features = fast_features.mean(dim=2, keepdim=True) + fast_x = self.pooler(fast_features, proposals) + if not self.config.MODEL.ROI_ACTION_HEAD.MEAN_BEFORE_POOLER: + fast_x = fast_x.mean(dim=2, keepdim=True) + x = fast_x + + if slow_features is not None and fast_features is not None: + x = torch.cat([slow_x, fast_x], dim=1) + return x + + def max_pooling_zero_safe(self, x): + if x.size(0) == 0: + _, c, t, h, w = x.size() + res = self.config.MODEL.ROI_ACTION_HEAD.POOLER_RESOLUTION + x = torch.zeros((0, c, 1, h - res + 1, w - res + 1), device=x.device) + else: + x = self.max_pooler(x) + return x + + def forward(self, slow_features, fast_features, proposals, objects=None, extras={}, part_forward=-1): + ia_active = hasattr(self, "ia_structure") + if part_forward == 1: + person_pooled = cat([box.get_field("pooled_feature") for box in proposals]) + if objects is None: + object_pooled = None + else: + object_pooled = cat([box.get_field("pooled_feature") for box in objects]) + else: + x = self.roi_pooling(slow_features, fast_features, proposals) + + person_pooled = self.max_pooler(x) + + if has_object(self.config.MODEL.IA_STRUCTURE): + object_pooled = self.roi_pooling(slow_features, fast_features, objects) + object_pooled = self.max_pooling_zero_safe(object_pooled) + else: + object_pooled = None + + if part_forward == 0: + return None, person_pooled, object_pooled + + x_after = person_pooled + + if ia_active: + tsfmr = self.ia_structure + mem_len = self.config.MODEL.IA_STRUCTURE.LENGTH + mem_rate = self.config.MODEL.IA_STRUCTURE.MEMORY_RATE + use_penalty = self.config.MODEL.IA_STRUCTURE.PENALTY + memory_person, memory_person_boxes = self.get_memory_feature(extras["person_pool"], extras, mem_len, mem_rate, + self.max_feature_len_per_sec, tsfmr.dim_others, + person_pooled, proposals, use_penalty) + + ia_feature = self.ia_structure(person_pooled, proposals, object_pooled, objects, memory_person, ) + x_after = self.fusion(x_after, ia_feature, self.config.MODEL.IA_STRUCTURE.FUSION) + + x_after = x_after.view(x_after.size(0), -1) + + x_after = F.relu(self.fc1(x_after)) + x_after = F.relu(self.fc2(x_after)) + + return x_after, person_pooled, object_pooled + + def get_memory_feature(self, feature_pool, extras, mem_len, mem_rate, max_boxes, fixed_dim, current_x, current_box, use_penalty): + before, after = mem_len + mem_feature_list = [] + mem_pos_list = [] + device = current_x.device + if use_penalty and self.training: + cur_loss = extras["cur_loss"] + else: + cur_loss = 0.0 + current_feat = prepare_pooled_feature(current_x, current_box, detach=True) + for movie_id, timestamp, new_feat in zip(extras["movie_ids"], extras["timestamps"], current_feat): + before_inds = range(timestamp - before * mem_rate, timestamp, mem_rate) + after_inds = range(timestamp + mem_rate, timestamp + (after + 1) * mem_rate, mem_rate) + cache_cur_mov = feature_pool[movie_id] + mem_box_list_before = [self.check_fetch_mem_feature(cache_cur_mov, mem_ind, max_boxes, cur_loss, use_penalty) + for mem_ind in before_inds] + mem_box_list_after = [self.check_fetch_mem_feature(cache_cur_mov, mem_ind, max_boxes, cur_loss, use_penalty) + for mem_ind in after_inds] + mem_box_current = [self.sample_mem_feature(new_feat, max_boxes), ] + mem_box_list = mem_box_list_before + mem_box_current + mem_box_list_after + mem_feature_list += [box_list.get_field("pooled_feature") + if box_list is not None + else torch.zeros(0, fixed_dim, 1, 1, 1, dtype=torch.float32, device="cuda") + for box_list in mem_box_list] + mem_pos_list += [box_list.bbox + if box_list is not None + else torch.zeros(0, 4, dtype=torch.float32, device="cuda") + for box_list in mem_box_list] + + seq_length = sum(mem_len) + 1 + person_per_seq = seq_length * max_boxes + mem_feature = pad_sequence(mem_feature_list, max_boxes) + mem_feature = mem_feature.view(-1, person_per_seq, fixed_dim, 1, 1, 1) + mem_feature = mem_feature.to(device) + mem_pos = pad_sequence(mem_pos_list, max_boxes) + mem_pos = mem_pos.view(-1, person_per_seq, 4) + mem_pos = mem_pos.to(device) + + return mem_feature, mem_pos + + def check_fetch_mem_feature(self, movie_cache, mem_ind, max_num, cur_loss, use_penalty): + if mem_ind not in movie_cache: + return None + box_list = movie_cache[mem_ind] + box_list = self.sample_mem_feature(box_list, max_num) + if use_penalty and self.training: + loss_tag = box_list.delete_field("loss_tag") + penalty = loss_tag / cur_loss if loss_tag < cur_loss else cur_loss / loss_tag + features = box_list.get_field("pooled_feature") * penalty + box_list.add_field("pooled_feature", features) + return box_list + + def sample_mem_feature(self, box_list, max_num): + if len(box_list) > max_num: + idx = torch.randperm(len(box_list))[:max_num] + return box_list[idx].to("cuda") + else: + return box_list.to("cuda") + + def fusion(self, x, out, type="add"): + if type == "add": + return x + out + elif type == "concat": + return torch.cat([x, out], dim=1) + else: + raise NotImplementedError + + +def make_roi_action_feature_extractor(cfg, dim_in): + func = registry.ROI_ACTION_FEATURE_EXTRACTORS[ + cfg.MODEL.ROI_ACTION_HEAD.FEATURE_EXTRACTOR + ] + return func(cfg, dim_in) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/roi_action_predictors.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/roi_action_predictors.py new file mode 100644 index 0000000..0cbec3f --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/action_head/roi_action_predictors.py @@ -0,0 +1,36 @@ +from torch import nn +from alphaction.modeling import registry + + +@registry.ROI_ACTION_PREDICTORS.register("FCPredictor") +class FCPredictor(nn.Module): + def __init__(self, config, dim_in): + super(FCPredictor, self).__init__() + + num_classes = config.MODEL.ROI_ACTION_HEAD.NUM_CLASSES + + dropout_rate = config.MODEL.ROI_ACTION_HEAD.DROPOUT_RATE + if dropout_rate > 0: + self.dropout = nn.Dropout(p=dropout_rate, inplace=True) + + self.cls_score = nn.Linear(dim_in, num_classes) + + nn.init.normal_(self.cls_score.weight, std=0.01) + nn.init.constant_(self.cls_score.bias, 0) + + def forward(self, x): + x = x.view(x.size(0), -1) + if hasattr(self, "dropout"): + x = self.dropout(x) + scores = self.cls_score(x) + + return scores + + def c2_weight_mapping(self): + return {"cls_score.weight": "pred_w", + "cls_score.bias": "pred_b"} + + +def make_roi_action_predictor(cfg, dim_in): + func = registry.ROI_ACTION_PREDICTORS[cfg.MODEL.ROI_ACTION_HEAD.PREDICTOR] + return func(cfg, dim_in) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/roi_heads_3d.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/roi_heads_3d.py new file mode 100644 index 0000000..167ab92 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/roi_heads/roi_heads_3d.py @@ -0,0 +1,34 @@ +import torch + +from .action_head.action_head import build_roi_action_head + + +class Combined3dROIHeads(torch.nn.ModuleDict): + def __init__(self, cfg, heads): + super(Combined3dROIHeads, self).__init__(heads) + self.cfg = cfg.clone() + + def forward(self, slow_features, fast_features, boxes, objects=None, extras={}, part_forward=-1): + result, loss_action, loss_weight, accuracy_action = self.action(slow_features, fast_features, boxes, objects, extras, part_forward) + + return result, loss_action, loss_weight, accuracy_action + + def c2_weight_mapping(self): + weight_map = {} + for name, m_child in self.named_children(): + if m_child.state_dict() and hasattr(m_child,"c2_weight_mapping"): + child_map = m_child.c2_weight_mapping() + for key, val in child_map.items(): + new_key = name + '.' + key + weight_map[new_key] = val + return weight_map + + +def build_3d_roi_heads(cfg, dim_in): + roi_heads = [] + roi_heads.append(("action", build_roi_action_head(cfg, dim_in))) + + if roi_heads: + roi_heads = Combined3dROIHeads(cfg, roi_heads) + + return roi_heads diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/utils.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/utils.py new file mode 100644 index 0000000..286bffe --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/modeling/utils.py @@ -0,0 +1,46 @@ +""" +Miscellaneous utility functions +""" + +import torch +from alphaction.structures.bounding_box import BoxList + + +def cat(tensors, dim=0): + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + +def pad_sequence(sequence, targ_size, padding_value=0): + tensor_size = sequence[0].size() + trailing_dims = tensor_size[1:] + out_dims = (len(sequence), targ_size) + trailing_dims + + out_tensor = sequence[0].new_full(out_dims, padding_value) + for i, tensor in enumerate(sequence): + length = tensor.size(0) + out_tensor[i, :length, ...] = tensor + + return out_tensor + +def prepare_pooled_feature(x_pooled, boxes, detach=True): + image_shapes = [box.size for box in boxes] + boxes_per_image = [len(box) for box in boxes] + box_tensors = [a.bbox for a in boxes] + + if detach: + x_pooled = x_pooled.detach() + pooled_feature = x_pooled.split(boxes_per_image, dim=0) + + boxes_result = [] + for feature_per_image, boxes_per_image, image_shape in zip( + pooled_feature, box_tensors, image_shapes + ): + boxlist = BoxList(boxes_per_image, image_shape, mode="xyxy") + boxlist.add_field("pooled_feature", feature_per_image) + boxes_result.append(boxlist) + return boxes_result \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/__init__.py new file mode 100644 index 0000000..385542d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/__init__.py @@ -0,0 +1,3 @@ +from .build import make_optimizer +from .build import make_lr_scheduler +from .lr_scheduler import WarmupMultiStepLR diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/build.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/build.py new file mode 100644 index 0000000..2e061b7 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/build.py @@ -0,0 +1,58 @@ +import torch + +from .lr_scheduler import WarmupMultiStepLR, HalfPeriodCosStepLR + +import torch.nn as nn +from alphaction.modeling.roi_heads.action_head.IA_structure import IAStructure + + +def make_optimizer(cfg, model): + params = [] + bn_param_set = set() + transformer_param_set = set() + for name, module in model.named_modules(): + if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + bn_param_set.add(name + ".weight") + bn_param_set.add(name + ".bias") + elif isinstance(module, IAStructure): + for param_name, _ in module.named_parameters(name): + transformer_param_set.add(param_name) + for key, value in model.named_parameters(): + if not value.requires_grad: + continue + lr = cfg.SOLVER.BASE_LR + weight_decay = cfg.SOLVER.WEIGHT_DECAY + if key in bn_param_set: + weight_decay = cfg.SOLVER.WEIGHT_DECAY_BN + elif "bias" in key: + lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR + weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS + if key in transformer_param_set: + lr = lr * cfg.SOLVER.IA_LR_FACTOR + params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] + + optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM) + return optimizer + + +def make_lr_scheduler(cfg, optimizer): + scheduler = cfg.SOLVER.SCHEDULER + if scheduler not in ("half_period_cosine", "warmup_multi_step"): + raise ValueError('Scheduler not available') + if scheduler == 'warmup_multi_step': + return WarmupMultiStepLR( + optimizer, + cfg.SOLVER.STEPS, + cfg.SOLVER.GAMMA, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS if cfg.SOLVER.WARMUP_ON else 0, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + ) + elif scheduler == 'half_period_cosine': + return HalfPeriodCosStepLR( + optimizer, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS if cfg.SOLVER.WARMUP_ON else 0, + max_iters=cfg.SOLVER.MAX_ITER, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + ) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/lr_scheduler.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/lr_scheduler.py new file mode 100644 index 0000000..0a47e24 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/solver/lr_scheduler.py @@ -0,0 +1,87 @@ +# Modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/solver/lr_scheduler.py +from bisect import bisect_right + +import torch +import math + + +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer, + milestones, + gamma=0.1, + warmup_factor=1.0 / 3, + warmup_iters=500, + warmup_method="linear", + last_epoch=-1, + ): + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", + milestones, + ) + + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted" + "got {}".format(warmup_method) + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + warmup_factor = 1 + if self.last_epoch < self.warmup_iters: + if self.warmup_method == "constant": + warmup_factor = self.warmup_factor + elif self.warmup_method == "linear": + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + return [ + base_lr + * warmup_factor + * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + +class HalfPeriodCosStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer, + warmup_factor=1.0 / 3, + warmup_iters=8000, + max_iters=60000, + warmup_method="linear", + last_epoch=-1, + ): + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted" + "got {}".format(warmup_method) + ) + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.max_iters = max_iters + self.warmup_method = warmup_method + super(HalfPeriodCosStepLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + warmup_factor = 1 + if self.last_epoch < self.warmup_iters: + if self.warmup_method == "constant": + warmup_factor = self.warmup_factor + elif self.warmup_method == "linear": + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + else: + warmup_factor = 0.5 * (math.cos(self.last_epoch / self.max_iters * math.pi) + 1) + return [ + base_lr + * warmup_factor + for base_lr in self.base_lrs + ] \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-36.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..dbdf4b0 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..ab382da Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..0c762d8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-36.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-36.pyc new file mode 100644 index 0000000..2af86e8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-36.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-37.pyc new file mode 100644 index 0000000..9caff11 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-39.pyc new file mode 100644 index 0000000..b4944ad Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/__pycache__/bounding_box.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/bounding_box.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/bounding_box.py new file mode 100644 index 0000000..db8ffe2 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/bounding_box.py @@ -0,0 +1,357 @@ +# Modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/structures/bounding_box.py +import torch +import pdb +# transpose +FLIP_LEFT_RIGHT = 0 +FLIP_TOP_BOTTOM = 1 + + +class BoxList(object): + """ + This class represents a set of bounding boxes. + The bounding boxes are represented as a Nx4 Tensor. + In order to uniquely determine the bounding boxes with respect + to an image, we also store the corresponding image dimensions. + They can contain extra information that is specific to each bounding box, such as + labels. + """ + + def __init__(self, bbox, image_size, mode="xyxy"): + device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu") + bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device) + if bbox.ndimension() != 2: + raise ValueError( + "bbox should have 2 dimensions, got {}".format(bbox.ndimension()) + ) + if bbox.size(-1) != 4: + raise ValueError( + "last dimension of bbox should have a " + "size of 4, got {}".format(bbox.size(-1)) + ) + if mode not in ("xyxy", "xywh"): + raise ValueError("mode should be 'xyxy' or 'xywh'") + + self.bbox = bbox + self.size = image_size # (image_width, image_height) + self.mode = mode + self.extra_fields = {} + + def add_field(self, field, field_data): + self.extra_fields[field] = field_data + + def get_field(self, field): + return self.extra_fields[field] + + def has_field(self, field): + return field in self.extra_fields + + def delete_field(self, field): + return self.extra_fields.pop(field, None) + + def fields(self): + return list(self.extra_fields.keys()) + + def _copy_extra_fields(self, bbox): + for k, v in bbox.extra_fields.items(): + self.extra_fields[k] = v + + def convert(self, mode): + if mode not in ("xyxy", "xywh"): + raise ValueError("mode should be 'xyxy' or 'xywh'") + if mode == self.mode: + return self + # we only have two modes, so don't need to check + # self.mode + xmin, ymin, xmax, ymax = self._split_into_xyxy() + if mode == "xyxy": + bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1) + bbox = BoxList(bbox, self.size, mode=mode) + else: + TO_REMOVE = 1 + bbox = torch.cat( + (xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1 + ) + bbox = BoxList(bbox, self.size, mode=mode) + bbox._copy_extra_fields(self) + return bbox + + def _split_into_xyxy(self): + if self.mode == "xyxy": + xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1) + return xmin, ymin, xmax, ymax + elif self.mode == "xywh": + TO_REMOVE = 1 + xmin, ymin, w, h = self.bbox.split(1, dim=-1) + return ( + xmin, + ymin, + xmin + (w - TO_REMOVE).clamp(min=0), + ymin + (h - TO_REMOVE).clamp(min=0), + ) + else: + raise RuntimeError("Should not be here") + + def resize(self, size, *args, **kwargs): + """ + Returns a resized copy of this bounding box + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + """ + + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) + if ratios[0] == ratios[1]: + ratio = ratios[0] + scaled_box = self.bbox * ratio + bbox = BoxList(scaled_box, size, mode=self.mode) + # bbox._copy_extra_fields(self) + for k, v in self.extra_fields.items(): + if not isinstance(v, torch.Tensor) and (hasattr(v, "resize")): + v = v.resize(size, *args, **kwargs) + bbox.add_field(k, v) + return bbox + + ratio_width, ratio_height = ratios + xmin, ymin, xmax, ymax = self._split_into_xyxy() + scaled_xmin = xmin * ratio_width + scaled_xmax = xmax * ratio_width + scaled_ymin = ymin * ratio_height + scaled_ymax = ymax * ratio_height + scaled_box = torch.cat( + (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1 + ) + bbox = BoxList(scaled_box, size, mode="xyxy") + # bbox._copy_extra_fields(self) + for k, v in self.extra_fields.items(): + if not isinstance(v, torch.Tensor) and hasattr(v, "resize"): + v = v.resize(size, *args, **kwargs) + bbox.add_field(k, v) + + return bbox.convert(self.mode) + + def transpose(self, method): + """ + Transpose bounding box (flip or rotate in 90 degree steps) + :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`, + :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`, + :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`, + :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`. + """ + if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM): + raise NotImplementedError( + "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented" + ) + + image_width, image_height = self.size + xmin, ymin, xmax, ymax = self._split_into_xyxy() + if method == FLIP_LEFT_RIGHT: + TO_REMOVE = 1 + transposed_xmin = image_width - xmax - TO_REMOVE + transposed_xmax = image_width - xmin - TO_REMOVE + transposed_ymin = ymin + transposed_ymax = ymax + elif method == FLIP_TOP_BOTTOM: + transposed_xmin = xmin + transposed_xmax = xmax + transposed_ymin = image_height - ymax + transposed_ymax = image_height - ymin + + transposed_boxes = torch.cat( + (transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1 + ) + bbox = BoxList(transposed_boxes, self.size, mode="xyxy") + for k, v in self.extra_fields.items(): + if not isinstance(v, torch.Tensor) and (hasattr(v, 'transpose')): + v = v.transpose(method) + bbox.add_field(k, v) + return bbox.convert(self.mode) + + def crop(self, box): + """ + Cropss a rectangular region from this bounding box. The box is a + 4-tuple defining the left, upper, right, and lower pixel + coordinate. + """ + xmin, ymin, xmax, ymax = self._split_into_xyxy() + w, h = box[2] - box[0], box[3] - box[1] + cropped_xmin = (xmin - box[0]).clamp(min=0, max=w) + cropped_ymin = (ymin - box[1]).clamp(min=0, max=h) + cropped_xmax = (xmax - box[0]).clamp(min=0, max=w) + cropped_ymax = (ymax - box[1]).clamp(min=0, max=h) + + # TODO should I filter empty boxes here? + if False: + is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax) + + cropped_box = torch.cat( + (cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1 + ) + bbox = BoxList(cropped_box, (w, h), mode="xyxy") + # bbox._copy_extra_fields(self) + for k, v in self.extra_fields.items(): + if not isinstance(v, torch.Tensor): + v = v.crop(box) + bbox.add_field(k, v) + return bbox.convert(self.mode) + + def extend(self, scale): + """ + Return a extended bounding box copy of this bounding box. + All other fields should be keep unchanged. + :param scale: By what extent the bounding boxes will be extended. + :return: A extended copy. + """ + if len(scale)<2: + x_scale = y_scale = scale[0] + else: + x_scale = scale[0] + y_scale = scale[1] + TO_REMOVE = 1 + xmin, ymin, xmax, ymax = self._split_into_xyxy() + boxw, boxh = xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE + padw, padh = float(x_scale) * boxw / 2, float(y_scale) * boxh / 2 + extended_xmin = xmin - padw + extended_ymin = ymin - padh + extended_xmax = xmax + padw + extended_ymax = ymax + padh + extended_box = torch.cat( + (extended_xmin, extended_ymin, extended_xmax, extended_ymax), dim=-1 + ) + bbox = BoxList(extended_box, self.size, mode='xyxy') + bbox.clip_to_image() + for k, v in self.extra_fields.items(): + bbox.add_field(k, v) + return bbox.convert(self.mode) + + def random_aug(self, jitter_x_out, jitter_x_in, jitter_y_out, jitter_y_in): + TO_REMOVE = 1 + xmin, ymin, xmax, ymax = self._split_into_xyxy() + device = xmin.device + + def torch_uniform(rows, a=0.0, b=1.0): + return torch.rand(rows, 1, dtype=torch.float32, device=device) * (b - a) + a + + boxw, boxh = xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE + + num_boxes = len(self) + + jitter_xmin = xmin + boxw * torch_uniform(num_boxes, -jitter_x_out, jitter_x_in) + jitter_ymin = ymin + boxh * torch_uniform(num_boxes, -jitter_y_out, jitter_y_in) + jitter_xmax = xmax + boxw * torch_uniform(num_boxes, -jitter_x_in, jitter_x_out) + jitter_ymax = ymax + boxh * torch_uniform(num_boxes, -jitter_y_in, jitter_y_out) + jitter_xmin.clamp_(min=0, max=self.size[0] - TO_REMOVE - 1) + jitter_ymin.clamp_(min=0, max=self.size[1] - TO_REMOVE - 1) + jitter_xmax = torch.max(torch.clamp(jitter_xmax, max=self.size[0] - TO_REMOVE), jitter_xmin + 1) + jitter_ymax = torch.max(torch.clamp(jitter_ymax, max=self.size[1] - TO_REMOVE), jitter_ymin + 1) + + aug_box = torch.cat( + (jitter_xmin, jitter_ymin, jitter_xmax, jitter_ymax), dim=-1 + ) + bbox = BoxList(aug_box, self.size, mode='xyxy') + bbox.clip_to_image(remove_empty=False) + for k, v in self.extra_fields.items(): + bbox.add_field(k, v) + return bbox.convert(self.mode) + + # Tensor-like methods + + def to(self, device): + bbox = BoxList(self.bbox.to(device), self.size, self.mode) + for k, v in self.extra_fields.items(): + if hasattr(v, "to"): + v = v.to(device) + bbox.add_field(k, v) + return bbox + + def top_k(self, k): + # categories, bbox, scores + if "scores" in self.extra_fields: + scores = self.extra_fields["scores"] + length = len(scores) + start = max(length - k, 0) + idx = torch.argsort(scores)[start:] + bbox = BoxList(self.bbox[[idx]], self.size, self.mode) + for k, v in self.extra_fields.items(): + if isinstance(v, torch.Tensor): + bbox.add_field(k, v[idx]) + else: + bbox.add_field(k, v) + else: + bbox = BoxList(self.bbox[:k], self.size, self.mode) + for k, v in self.extra_fields.items(): + if isinstance(v, torch.Tensor): + bbox.add_field(k, v[:k]) + else: + bbox.add_field(k, v) + return bbox + + def __getitem__(self, item): + bbox = BoxList(self.bbox[item].reshape(-1,4), self.size, self.mode) + for k, v in self.extra_fields.items(): + if isinstance(v, torch.Tensor): + bbox.add_field(k, v[item]) + else: + bbox.add_field(k, v) + return bbox + + def update_box(self, box, mode): + self.bbox = box + self.convert(mode) + + + def __len__(self): + return self.bbox.shape[0] + + def clip_to_image(self, remove_empty=True): + TO_REMOVE = 1 + self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE) + self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE) + self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE) + self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE) + if remove_empty: + box = self.bbox + keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0]) + return self[keep] + return self + + def area(self): + box = self.bbox + if self.mode == "xyxy": + TO_REMOVE = 1 + area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE) + elif self.mode == "xywh": + area = box[:, 2] * box[:, 3] + else: + raise RuntimeError("Should not be here") + + return area + + def copy_with_fields(self, fields, skip_missing=False): + bbox = BoxList(self.bbox, self.size, self.mode) + if not isinstance(fields, (list, tuple)): + fields = [fields] + for field in fields: + if self.has_field(field): + bbox.add_field(field, self.get_field(field)) + elif not skip_missing: + raise KeyError("Field '{}' not found in {}".format(field, self)) + return bbox + + def __repr__(self): + s = self.__class__.__name__ + "(" + s += "num_boxes={}, ".format(len(self)) + s += "image_width={}, ".format(self.size[0]) + s += "image_height={}, ".format(self.size[1]) + s += "mode={})".format(self.mode) + return s + + +if __name__ == "__main__": + bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10)) + s_bbox = bbox.resize((5, 5)) + print(s_bbox) + print(s_bbox.bbox) + + t_bbox = bbox.transpose(0) + print(t_bbox) + print(t_bbox.bbox) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/memory_pool.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/memory_pool.py new file mode 100644 index 0000000..9d8f636 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/structures/memory_pool.py @@ -0,0 +1,38 @@ +from collections import defaultdict + +class MemoryPool(object): + def __init__(self): + self.cache = defaultdict(dict) + + def update(self, update_info): + for movie_id, feature_per_movie in update_info.items(): + self.cache[movie_id].update(feature_per_movie) + + def update_list(self, update_info_list): + for update_info in update_info_list: + self.update(update_info) + + def __getitem__(self, item): + if isinstance(item, tuple) and len(item)==2: + return self.cache[item[0]][item[1]] + return self.cache[item] + + def __setitem__(self, key, value): + if isinstance(key, tuple) and len(key)==2: + self.cache[key[0]][key[1]] = value + else: + self.cache[key] = value + + def __delitem__(self, item): + if isinstance(item, tuple) and len(item)==2: + del self.cache[item[0]][item[1]] + else: + del self.cache[item] + + def __contains__(self, item): + if isinstance(item, tuple) and len(item)==2: + return (item[0] in self.cache and item[1] in self.cache[item[0]]) + return (item in self.cache) + + def items(self): + return self.cache.items() \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/IA_helper.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/IA_helper.py new file mode 100644 index 0000000..37c6067 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/IA_helper.py @@ -0,0 +1,20 @@ +import itertools + +def _block_set(ia_blocks): + if len(ia_blocks) > 0 and isinstance(ia_blocks[0], list): + ia_blocks = list(itertools.chain.from_iterable(ia_blocks)) + return ia_blocks + +def has_person(ia_config): + ia_blocks = _block_set(ia_config.I_BLOCK_LIST) + return (ia_config.ACTIVE and 'P' in ia_blocks and ia_config.MAX_PERSON > 0) + + +def has_object(ia_config): + ia_blocks = _block_set(ia_config.I_BLOCK_LIST) + return (ia_config.ACTIVE and 'O' in ia_blocks and ia_config.MAX_OBJECT > 0) + + +def has_memory(ia_config): + ia_blocks = _block_set(ia_config.I_BLOCK_LIST) + return (ia_config.ACTIVE and 'M' in ia_blocks and ia_config.MAX_PER_SEC > 0) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/IA_helper.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/IA_helper.cpython-37.pyc new file mode 100644 index 0000000..0ed306f Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/IA_helper.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/IA_helper.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/IA_helper.cpython-39.pyc new file mode 100644 index 0000000..7835c5f Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/IA_helper.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..5369b3e Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/__init__.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..a418942 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/comm.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/comm.cpython-37.pyc new file mode 100644 index 0000000..af765e8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/comm.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/comm.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/comm.cpython-39.pyc new file mode 100644 index 0000000..bee7fa6 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/comm.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/video_decode.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/video_decode.cpython-37.pyc new file mode 100644 index 0000000..1a54eb8 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/video_decode.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/video_decode.cpython-39.pyc b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/video_decode.cpython-39.pyc new file mode 100644 index 0000000..4b781ae Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/__pycache__/video_decode.cpython-39.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/c2_model_loading.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/c2_model_loading.py new file mode 100644 index 0000000..bce1c9d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/c2_model_loading.py @@ -0,0 +1,41 @@ +import logging +import torch +import pickle +from collections import OrderedDict + + +def _rename_weights(weights, weight_map): + logger = logging.getLogger(__name__) + logger.info("Remapping C2 weights") + max_c2_key_size = max([len(k) for k in weight_map.values()]) + new_weights = OrderedDict() + for k in weight_map: + c2_name = weight_map[k] + logger.info("C2 name: {: <{}} mapped name: {}".format(c2_name, max_c2_key_size, k)) + if c2_name not in weights: + logger.info("{} not found in C2 weights file, skipped.".format(c2_name)) + continue + v = weights[c2_name] + w = torch.from_numpy(v) + new_weights[k] = w + return new_weights + + +def _load_c2_pickled_weights(file_path): + with open(file_path, "rb") as f: + if torch._six.PY3: + data = pickle.load(f, encoding="latin1") + else: + data = pickle.load(f) + if "blobs" in data: + weights = data["blobs"] + else: + weights = data + return weights + + +def load_c2_format(f, weight_map): + # We also support load from caffe2 weights. + state_dict = _load_c2_pickled_weights(f) + state_dict = _rename_weights(state_dict, weight_map) + return dict(model=state_dict) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/checkpoint.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/checkpoint.py new file mode 100644 index 0000000..c2864bf --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/checkpoint.py @@ -0,0 +1,143 @@ +# Modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/checkpoint.py +import logging +import os + +import torch + +from alphaction.utils.model_serialization import load_state_dict +from alphaction.utils.c2_model_loading import load_c2_format +from alphaction.structures.memory_pool import MemoryPool + + +class Checkpointer(object): + def __init__( + self, + model, + optimizer=None, + scheduler=None, + save_dir="", + save_to_disk=None, + logger=None, + ): + self.model = model + self.optimizer = optimizer + self.scheduler = scheduler + self.save_dir = save_dir + self.save_to_disk = save_to_disk + if logger is None: + logger = logging.getLogger(__name__) + self.logger = logger + + def save(self, name, **kwargs): + if not self.save_dir: + return + + if not self.save_to_disk: + return + + data = {} + data["model"] = self.model.state_dict() + if self.optimizer is not None: + data["optimizer"] = self.optimizer.state_dict() + if self.scheduler is not None: + data["scheduler"] = self.scheduler.state_dict() + data.update(kwargs) + + save_file = os.path.join(self.save_dir, "{}.pth".format(name)) + self.logger.info("Saving checkpoint to {}".format(save_file)) + torch.save(data, save_file) + self.tag_last_checkpoint(save_file) + + def load(self, f=None, model_weight_only=False, adjust_scheduler=False, no_head=False): + if self.has_checkpoint(): + # override argument with existing checkpoint + f = self.get_checkpoint_file() + if not f: + # no checkpoint could be found + self.logger.info("No checkpoint found. Initializing model from scratch") + return {} + self.logger.info("Loading checkpoint from {}".format(f)) + checkpoint = self._load_file(f) + self._load_model(checkpoint, no_head) + if "optimizer" in checkpoint and self.optimizer: + if model_weight_only: + del checkpoint['optimizer'] + else: + self.logger.info("Loading optimizer from {}".format(f)) + self.optimizer.load_state_dict(checkpoint.pop("optimizer")) + if "scheduler" in checkpoint and self.scheduler: + if model_weight_only: + del checkpoint['scheduler'] + elif adjust_scheduler: + last_epoch = checkpoint.pop("scheduler")['last_epoch'] + self.logger.info("Adjust scheduler at iteration {}".format(last_epoch)) + self.scheduler.step(last_epoch) + else: + self.logger.info("Loading scheduler from {}".format(f)) + self.scheduler.load_state_dict(checkpoint.pop("scheduler")) + + if model_weight_only: + checkpoint["iteration"] = 0 + checkpoint["person_pool"] = MemoryPool() + # return any further checkpoint dataset + return checkpoint + + def has_checkpoint(self): + save_file = os.path.join(self.save_dir, "last_checkpoint") + return os.path.exists(save_file) + + def get_checkpoint_file(self): + save_file = os.path.join(self.save_dir, "last_checkpoint") + try: + with open(save_file, "r") as f: + last_saved = f.read() + last_saved = last_saved.strip() + except IOError: + # if file doesn't exist, maybe because it has just been + # deleted by a separate process + last_saved = "" + return last_saved + + def tag_last_checkpoint(self, last_filename): + save_file = os.path.join(self.save_dir, "last_checkpoint") + with open(save_file, "w") as f: + f.write(last_filename) + + def _load_file(self, f): + return torch.load(f, map_location=torch.device("cpu")) + + def _load_model(self, checkpoint, no_head): + load_state_dict(self.model, checkpoint.pop("model"), no_head) + + +class ActionCheckpointer(Checkpointer): + def __init__( + self, + cfg, + model, + optimizer=None, + scheduler=None, + save_dir="", + save_to_disk=None, + logger=None, + ): + super(ActionCheckpointer, self).__init__( + model, optimizer, scheduler, save_dir, save_to_disk, logger + ) + self.cfg = cfg.clone() + + def _load_file(self, f): + if f.endswith(".pkl"): + return load_c2_format(f, self._get_c2_weight_map()) + loaded = super(ActionCheckpointer, self)._load_file(f) + if "model" not in loaded: + loaded = dict(model=loaded) + return loaded + + def _get_c2_weight_map(self): + if hasattr(self.model, "c2_weight_mapping"): + return self.model.c2_weight_mapping() + elif hasattr(self.model, "module") and hasattr(self.model.module, "c2_weight_mapping"): + return self.model.module.c2_weight_mapping() + else: + raise RuntimeError("Cannot get C2 weight mapping from current model definition.") \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/comm.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/comm.py new file mode 100644 index 0000000..22fa1c2 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/comm.py @@ -0,0 +1,207 @@ +# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/comm.py +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" + +import pickle +import torch +import torch.distributed as dist +import functools + +def get_world_size(): + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + +def synchronize(group=None): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + if group is None: + group = _get_global_gloo_group() + world_size = dist.get_world_size(group=group) + if world_size == 1: + return + dist.barrier(group=group) + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def _serialize_to_tensor(data, group): + backend = dist.get_backend(group) + assert backend in ["gloo", "nccl"] + device = torch.device("cpu" if backend == "gloo" else "cuda") + + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device=device) + return tensor + + +def _pad_to_largest_tensor(tensor, group): + """ + Returns: + list[int]: size of the tensor, on each rank + Tensor: padded tensor that has the max size + """ + world_size = dist.get_world_size(group=group) + assert ( + world_size >= 1 + ), "comm.all_gather must be called from ranks within the given group!" + local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) + size_list = [ + torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size) + ] + dist.all_gather(size_list, local_size, group=group) + size_list = [int(size.item()) for size in size_list] + + max_size = max(size_list) + + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + if local_size != max_size: + padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device) + tensor = torch.cat((tensor, padding), dim=0) + return size_list, tensor + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return [data] + + tensor = _serialize_to_tensor(data, group) + + size_list, tensor = _pad_to_largest_tensor(tensor, group) + max_size = max(size_list) + + # receiving Tensor from all ranks + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.all_gather(tensor_list, tensor, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group=group) == 1: + return [data] + rank = dist.get_rank(group=group) + + tensor = _serialize_to_tensor(data, group) + size_list, tensor = _pad_to_largest_tensor(tensor, group) + + # receiving Tensor from all ranks + if rank == dst: + max_size = max(size_list) + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.gather(tensor, tensor_list, dst=dst, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + return data_list + else: + dist.gather(tensor, [], dst=dst, group=group) + return [] + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + +def all_reduce(tensor, average=False): + world_size = get_world_size() + if world_size < 2: + return + dist.all_reduce(tensor) + if average: + tensor /= world_size \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/logger.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/logger.py new file mode 100644 index 0000000..6527670 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/logger.py @@ -0,0 +1,38 @@ +# Modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py +import logging +import os +import sys +import time + + +def setup_logger(name, save_dir, distributed_rank, filename=None): + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + # don't log results for the non-master process + if distributed_rank > 0: + return logger + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") + ch.setFormatter(formatter) + logger.addHandler(ch) + + if save_dir: + if filename is None: + filename = time.strftime("%Y-%m-%d_%H.%M.%S", time.localtime()) + ".log" + fh = logging.FileHandler(os.path.join(save_dir, filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger + +def setup_tblogger(save_dir, distributed_rank): + if distributed_rank>0: + return None + from tensorboardX import SummaryWriter + tbdir = os.path.join(save_dir,'tb') + os.makedirs(tbdir,exist_ok=True) + tblogger = SummaryWriter(tbdir) + return tblogger \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/metric_logger.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/metric_logger.py new file mode 100644 index 0000000..acd1b29 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/metric_logger.py @@ -0,0 +1,66 @@ +# Adopted from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/metric_logger.py +from collections import defaultdict +from collections import deque + +import torch + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20): + self.deque = deque(maxlen=window_size) + self.series = [] + self.total = 0.0 + self.count = 0 + + def update(self, value): + self.deque.append(value) + self.series.append(value) + self.count += 1 + self.total += value + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque)) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg) + ) + return self.delimiter.join(loss_str) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/model_serialization.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/model_serialization.py new file mode 100644 index 0000000..0709e5d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/model_serialization.py @@ -0,0 +1,83 @@ +# Modified from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/model_serialization.py +from collections import OrderedDict +import logging + +import torch + + +def align_and_update_state_dicts(model_state_dict, loaded_state_dict, no_head): + """ + Strategy: suppose that the models that we will create will have prefixes appended + to each of its keys, for example due to an extra level of nesting that the original + pre-trained weights from ImageNet won't contain. For example, model.state_dict() + might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains + res2.conv1.weight. We thus want to match both parameters together. + For that, we look for each model weight, look among all loaded keys if there is one + that is a suffix of the current weight name, and use it if that's the case. + If multiple matches exist, take the one with longest size + of the corresponding name. For example, for the same model as before, the pretrained + weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, + we want to match backbone[0].body.conv1.weight to conv1.weight, and + backbone[0].body.res2.conv1.weight to res2.conv1.weight. + """ + current_keys = sorted(list(model_state_dict.keys())) + loaded_keys = sorted(list(loaded_state_dict.keys())) + # get a matrix of string matches, where each (i, j) entry correspond to the size of the + # loaded_key string, if it matches + match_matrix = [ + len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys + ] + match_matrix = torch.as_tensor(match_matrix).view( + len(current_keys), len(loaded_keys) + ) + max_match_size, idxs = match_matrix.max(1) + # remove indices that correspond to no-match + idxs[max_match_size == 0] = -1 + + # used for logging + max_size = max([len(key) for key in current_keys]) if current_keys else 1 + max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1 + log_str_template = "{: <{}} loaded from {: <{}} of shape {}" + logger = logging.getLogger(__name__) + for idx_new, idx_old in enumerate(idxs.tolist()): + if idx_old == -1: + continue + key = current_keys[idx_new] + key_old = loaded_keys[idx_old] + + if no_head and key_old.startswith("roi_heads."): + logger.info("{} will not be loaded.".format(key)) + continue + + model_state_dict[key] = loaded_state_dict[key_old] + logger.info( + log_str_template.format( + key, + max_size, + key_old, + max_size_loaded, + tuple(loaded_state_dict[key_old].shape), + ) + ) + + +def strip_prefix_if_present(state_dict, prefix): + keys = sorted(state_dict.keys()) + if not all(key.startswith(prefix) for key in keys): + return state_dict + stripped_state_dict = OrderedDict() + for key, value in state_dict.items(): + stripped_state_dict[key.replace(prefix, "")] = value + return stripped_state_dict + + +def load_state_dict(model, loaded_state_dict, no_head): + model_state_dict = model.state_dict() + # if the state_dict comes from a model that was wrapped in a + # DataParallel or DistributedDataParallel during serialization, + # remove the "module" prefix before performing the matching + loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.") + align_and_update_state_dicts(model_state_dict, loaded_state_dict, no_head) + + # use strict loading + model.load_state_dict(model_state_dict) diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/random_seed.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/random_seed.py new file mode 100644 index 0000000..4cbee5d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/random_seed.py @@ -0,0 +1,12 @@ +import torch +import random +import numpy as np + +def set_seed(seed, rank, world_size): + rng = random.Random(seed) + seed_per_rank = [rng.randint(0, 2**32-1) for _ in range(world_size)] + cur_seed = seed_per_rank[rank] + random.seed(cur_seed) + torch.manual_seed(cur_seed) + torch.cuda.manual_seed(cur_seed) + np.random.seed(cur_seed) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/registry.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/registry.py new file mode 100644 index 0000000..88c8d0c --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/registry.py @@ -0,0 +1,44 @@ +# Adopted from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/registry.py + +def _register_generic(module_dict, module_name, module): + assert module_name not in module_dict + module_dict[module_name] = module + + +class Registry(dict): + ''' + A helper class for managing registering modules, it extends a dictionary + and provides a register functions. + + Eg. creeting a registry: + some_registry = Registry({"default": default_module}) + + There're two ways of registering new modules: + 1): normal way is just calling register function: + def foo(): + ... + some_registry.register("foo_module", foo) + 2): used as decorator when declaring the module: + @some_registry.register("foo_module") + @some_registry.register("foo_modeul_nickname") + def foo(): + ... + + Access of module is just like using a dictionary, eg: + f = some_registry["foo_modeul"] + ''' + def __init__(self, *args, **kwargs): + super(Registry, self).__init__(*args, **kwargs) + + def register(self, module_name, module=None): + # used as function call + if module is not None: + _register_generic(self, module_name, module) + return + + # used as decorator + def register_fn(fn): + _register_generic(self, module_name, fn) + return fn + + return register_fn diff --git a/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/video_decode.py b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/video_decode.py new file mode 100644 index 0000000..52a826c --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/alphaction/utils/video_decode.py @@ -0,0 +1,9 @@ +import av +import io +import decord +def av_decode_video(video_path): + with av.open(video_path) as container: + frames = [] + for frame in container.decode(video=0): + frames.append(frame.to_rgb().to_ndarray()) + return frames \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/avaeval.sh b/Downstream/Spatial-Temporal-Action-Localization/avaeval.sh new file mode 100644 index 0000000..43e2878 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/avaeval.sh @@ -0,0 +1,46 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 +export PATH=/mnt/lustre/share/gcc/gcc-5.3.0/bin/:$PATH +export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH +export PATH=/mnt/cache/share/cuda-11.3/bin:$PATH +export PATH=/mnt/cache/xingsen/.local/bin:$PATH +export LD_LIBRARY_PATH=/mnt/cache/share/cuda-11.3/lib64:$LD_LIBRARY_PATH +rm -rf /mnt/lustre/xingsen/.cache/torch_extensions +DATA_PATH='YOUR_PATH/list_kinetics-400' +MODEL_PATH='/mnt/petrelfs/videointern/xingsen/vit_h_ak_good/checkpoint-14/mp_rank_00_model_states.pt' +OUTPUT_DIR='/mnt/lustre/xingsen/videoMAE_ckp/ak_eval_kinetics_105_test/ckp14_ava' +PARTITION=${PARTITION:-"Gvlab"} +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p video \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=reserved \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 20 \ + --data_set "ava" \ + --drop_path 0.2\ + --eval \ + ${PY_ARGS} diff --git a/Downstream/Spatial-Temporal-Action-Localization/avalarge.sh b/Downstream/Spatial-Temporal-Action-Localization/avalarge.sh new file mode 100644 index 0000000..f7cbdf0 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/avalarge.sh @@ -0,0 +1,48 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 +export PATH=/mnt/lustre/share/gcc/gcc-5.3.0/bin/:$PATH +export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH +export PATH=/mnt/cache/share/cuda-11.3/bin:$PATH +export PATH=/mnt/cache/xingsen/.local/bin:$PATH +export LD_LIBRARY_PATH=/mnt/cache/share/cuda-11.3/lib64:$LD_LIBRARY_PATH +DATA_PATH='YOUR_PATH/list_kinetics-400' +MODEL_PATH='/mnt/petrelfs/share_data/huangbingkun/model/vit_g_ps14_hybridv3_pt_1200e_k710_ft_v2_8022.pth' +OUTPUT_DIR='/mnt/petrelfs/xingsen/videoMAE_ckp/vitg_hbk' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p video \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --quotatype=reserved \ + -x SH-IDC1-10-140-0-163 \ + --async \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_giant_patch14_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.0005 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --layer_decay 0.9 \ + --epochs 20 \ + --data_set "ava" \ + --enable_deepspeed \ + --val_freq 20 \ + --drop_path 0.2 \ + ${PY_ARGS} diff --git a/Downstream/Spatial-Temporal-Action-Localization/avalarge2.sh b/Downstream/Spatial-Temporal-Action-Localization/avalarge2.sh new file mode 100644 index 0000000..ed9e806 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/avalarge2.sh @@ -0,0 +1,48 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 +export PATH=/mnt/lustre/share/gcc/gcc-5.3.0/bin/:$PATH +export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH +export PATH=/mnt/cache/share/cuda-11.3/bin:$PATH +export PATH=/mnt/cache/xingsen/.local/bin:$PATH +export LD_LIBRARY_PATH=/mnt/cache/share/cuda-11.3/lib64:$LD_LIBRARY_PATH +DATA_PATH='YOUR_PATH/list_kinetics-400' +MODEL_PATH='/mnt/cache/share_data/huangbingkun.vendor/model/vit_l_hybrid_pt_800e_k700_ft.pth' +OUTPUT_DIR='/mnt/lustre/xingsen/videoMAE_ckp/k700_32GPU' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p video \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=reserved \ + --async \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --update_freq 1 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --enable_deepspeed \ + --val_freq 29 \ + --drop_path 0.2\ + ${PY_ARGS} diff --git a/Downstream/Spatial-Temporal-Action-Localization/avatest.sh b/Downstream/Spatial-Temporal-Action-Localization/avatest.sh new file mode 100644 index 0000000..f2fd4a4 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/avatest.sh @@ -0,0 +1,46 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 +export PATH=/mnt/lustre/share/gcc/gcc-5.3.0/bin/:$PATH +export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH +export PATH=/mnt/cache/share/cuda-11.3/bin:$PATH +export PATH=/mnt/cache/xingsen/.local/bin:$PATH +export LD_LIBRARY_PATH=/mnt/cache/share/cuda-11.3/lib64:$LD_LIBRARY_PATH +DATA_PATH='YOUR_PATH/list_kinetics-400' +MODEL_PATH='/mnt/lustre/share_data/xingsen/VideoMAEmodelzoo/checkpoint-1599-vitl.pth' +OUTPUT_DIR='/mnt/lustre/xingsen/videoMAE_ckp/vit_testonly' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-2} +GPUS_PER_NODE=${GPUS_PER_NODE:-2} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p Gvlab \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=spot \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 31 \ + --data_set "ava" \ + --enable_deepspeed \ + --val_freq 30 \ + --drop_path 0.2 \ + ${PY_ARGS} diff --git a/Downstream/Spatial-Temporal-Action-Localization/avatrain.sh b/Downstream/Spatial-Temporal-Action-Localization/avatrain.sh new file mode 100644 index 0000000..1c7b118 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/avatrain.sh @@ -0,0 +1,45 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 +export PATH=/mnt/lustre/share/gcc/gcc-5.3.0/bin/:$PATH +export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH +export PATH=/mnt/cache/share/cuda-11.3/bin:$PATH +export PATH=/mnt/cache/xingsen/.local/bin:$PATH +export LD_LIBRARY_PATH=/mnt/cache/share/cuda-11.3/lib64:$LD_LIBRARY_PATH +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_b_k400_pt_1600e_k400_ft_tsn.pth' +OUTPUT_DIR='/mnt/lustre/xingsen/videoMAE_ckp/debug' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p video \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=reserved \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 16 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00025 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --enable_deepspeed \ + --val_freq 29 \ + --drop_path 0.2 \ + --eval \ + ${PY_ARGS} diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/RandomAugmentBBox.py b/Downstream/Spatial-Temporal-Action-Localization/data/RandomAugmentBBox.py new file mode 100644 index 0000000..6e9f27e --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/RandomAugmentBBox.py @@ -0,0 +1,398 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Object detection strong augmentation utilities.""" +# pylint: disable=g-explicit-length-test + +import data.augmentations as augmentations +# from FasterRCNN.utils import np_box_ops + +import imgaug as ia +import imgaug.augmenters as iaa +from imgaug.augmenters.geometric import Affine +import numpy as np +import torch +from PIL import Image +import cv2 +# from tensorpack.utils import logger + + +RANDOM_COLOR_POLICY_OPS = ( + 'Identity', + 'AutoContrast', + 'Equalize', + 'Solarize', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'Posterize', +) + +# for image size == 800, 0.1 is 80. +CUTOUT = iaa.Cutout(nb_iterations=(1, 5), size=[0, 0.2], squared=True) + +DEGREE = 30 +AFFINE_TRANSFORM = iaa.Sequential( + [ + iaa.OneOf([ + Affine( # TranslateX + translate_percent={'x': (-0.1, 0.1)}, + order=[0, 1], + cval=125, + ), + Affine( # TranslateY + translate_percent={'y': (-0.1, 0.1)}, + order=[0, 1], + cval=125, + ), + Affine( # Rotate + rotate=(-DEGREE, DEGREE), + order=[0, 1], + cval=125, + ), + Affine( # ShearX and ShareY + shear=(-DEGREE, DEGREE), + order=[0, 1], + cval=125, + ), + ]), + ], + # do all of the above augmentations in random order + random_order=True) + +# AFFINE_TRANSFORM = iaa.Sequential( +# [ +# iaa.OneOf([ +# Affine( # TranslateX +# translate_percent={'x': (-0.1, 0.1)}, +# order=[0, 1], +# cval=125, +# ) +# ]) +# ], +# # do all of the above augmentations in random order +# random_order=True) + +AFFINE_TRANSFORM_WEAK = iaa.Sequential( + [ + iaa.OneOf([ + Affine( + translate_percent={'x': (-0.05, 0.05)}, + order=[0, 1], + cval=125, + ), + Affine( + translate_percent={'y': (-0.05, 0.05)}, + order=[0, 1], + cval=125, + ), + Affine( + rotate=(-10, 10), + order=[0, 1], + cval=125, + ), + Affine( + shear=(-10, 10), + order=[0, 1], + cval=125, + ), + ]), + ], + # do all of the above augmentations in random order + random_order=True) + +COLOR = iaa.Sequential( + [ + iaa.OneOf( # apply one color transformation + [ + iaa.Add((0, 0)), # identity transform + iaa.OneOf([ + iaa.GaussianBlur((0, 3.0)), + iaa.AverageBlur(k=(2, 7)), + ]), + iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), + iaa.AdditiveGaussianNoise( + loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5), + iaa.Invert(0.05, per_channel=True), # invert color channels + # Add a value of -10 to 10 to each pixel. + iaa.Add((-10, 10), per_channel=0.5), + # Change brightness of images (50-150% of original value). + iaa.Multiply((0.5, 1.5), per_channel=0.5), + # Improve or worsen the contrast of images. + iaa.contrast.LinearContrast((0.5, 2.0), per_channel=0.5), + ]) + ], + random_order=True) + + +def bb_to_array(bbs): + coords = [] + for bb in range(len(bbs)): + coord = list(bbs[bb]._split_into_xyxy()) + coords.append([i.squeeze().numpy() for i in coord]) + coords = np.array(coords) + return coords + +def siglebb_to_array(bbs): + coords = [] + for bb in bbs: + coords.append([bb.x1, bb.y1, bb.x2, bb.y2]) + # coords = np.array(coords) + return coords + +def allbb_to_array(bbs): + coords = [] + for bb in bbs: + coords.append(array_to_bb(bb_to_array(bb))) + return coords + +def array_to_bb(coords): + # convert to bbs + bbs = [] + for b in coords: + bbs.append(ia.BoundingBox(x1=b[0], y1=b[1], x2=b[2], y2=b[3])) + return bbs + + +class RandomAugmentBBox(object): + """Augmentation class.""" + + def __init__(self, + is_train = True, + aug_type='strong', + magnitude=10, + weighted_inbox_selection=False): + self.affine_aug_op = AFFINE_TRANSFORM + # for inbox affine, we use small degree + self.inbox_affine_aug_op = AFFINE_TRANSFORM_WEAK + self.jitter_aug_op = COLOR + self.cutout_op = CUTOUT + self.magnitude = magnitude + self.aug_type = aug_type + self.is_train = is_train + self.weighted_inbox_selection = weighted_inbox_selection + + # self.augment_fn is a list of list (i.g. [[],...]), each item is a list of + # augmentation will be randomly picked up. Th total items determine the + # number of layers of augmentation to apply + # Note: put cutout_augment at last if used. + if aug_type == 'strong': + # followd by cutout + self.augment_fn = [[self.color_augment], + [self.bbox_affine_transform, self.affine_transform], + [self.cutout_augment]] + # self.augment_fn = [[self.bbox_affine_transform]] + elif aug_type == 'default': + self.augment_fn = [[self.color_augment], + [self.affine_transform]] + elif aug_type == 'default_w_cutout': + self.augment_fn = [[self.color_augment], + [self.affine_transform], + [self.cutout_augment]] + elif aug_type == 'default_wo_affine': + self.augment_fn = [[self.color_augment], + [self.affine_transform], + [self.cutout_augment]] + else: + raise NotImplementedError('aug_type {} does not exist'.format(aug_type)) + self.randomize_parameters() + # logger.info('-' * 100) + # logger.info('Augmentation type {}: {}'.format(aug_type, self.augment_fn)) + # logger.info('-' * 100) + + def normaize(self, x): + x = x / 255.0 + x = x / 0.5 - 1.0 + return x + + def unnormaize(self, x): + x = (x + 1.0) * 0.5 * 255.0 + return x + + def numpy_apply_policies(self, arglist): + x, policies = arglist + re = [] + for y, policy in zip(x, policies): + # apply_policy has input to have images [-1, 1] + y_a = augmentations.apply_policy(policy, self.normaize(y)) + y_a = np.reshape(y_a, y.shape) + y_a = self.unnormaize(y_a) + re.append(y_a) + return np.stack(re).astype('f') + + def bbox_affine_transform(self, results, **kwargs): + """In-box affine transformation.,这里是将图片中的一些box部分图片裁剪下来,仅仅对裁剪下来的部分进行变换""" + images, bounding_boxes,transform_randoms = results + real_box_n = [len(i) for i in bounding_boxes] + if isinstance(images, np.ndarray): + images = np.split(images, len(images), axis=0) + images = [i.squeeze() for i in images] + shape = images[0].shape + copybounding_boxes = [bounding_boxes for _ in range(len(images))] + for im, boxes in zip(images, copybounding_boxes): + boxes = bb_to_array(boxes) + # large area has better probability to be sampled + if self.weighted_inbox_selection: + area = np_box_ops.area(boxes[:real_box_n]) + k = np.random.choice([i for i in range(real_box_n)], + 1, + p=area / area.sum())[0] + else: + k = self.k_det + + if len(boxes) > 0: + # import pdb + # pdb.set_trace() + box = boxes[k] + im_crop = im[int(box[1]):int(box[3]), int(box[0]):int(box[2])].copy() + im_paste = self.inbox_affine_aug_op_det(images=[im_crop])[0] + # in-memory operation + im[int(box[1]):int(box[3]), int(box[0]):int(box[2])] = im_paste + assert shape == images[0].shape + images = np.array(images) + return images, bounding_boxes, transform_randoms + + def affine_transform(self, results): + """Global affine transformation.""" + images, bounding_boxes,transform_randoms = results + if isinstance(images, np.ndarray): + images = np.split(images, len(images), axis=0) + images = [i.squeeze() for i in images] + shape = images[0].shape + bounding_boxes = [bounding_boxes for _ in range(len(images))] + ori_bbx = allbb_to_array(bounding_boxes) + images_aug, bbs_aug = self.affine_aug_op_det( + images=images, bounding_boxes=ori_bbx) + for i in range(len(bbs_aug)): + new_array = siglebb_to_array(bbs_aug[i]) + new_array = torch.as_tensor(new_array, dtype=torch.float32).reshape(-1, 4) + assert new_array.shape == bounding_boxes[i].bbox.shape + bounding_boxes[i].update_box(new_array,"xyxy") + break + assert shape == images_aug[0].shape + images_aug = np.array(images_aug) + return images_aug, bounding_boxes[0], transform_randoms + + def jitter_augment(self, results): + """Color jitters.""" + images, bounding_boxes,transform_randoms = results + images_aug = self.jitter_aug_op(images=images) + return images_aug, bounding_boxes, transform_randoms + + def cutout_augment(self, results): + """Cutout augmentation.""" + images, bounding_boxes,transform_randoms = results + images_aug = self.cutout_op_det(images=images) + return images_aug, bounding_boxes, transform_randoms + + def color_augment(self, results, p=1.0, **kwargs): + """RandAug color augmentation.""" + """颜色增强,不会改变box,images:List[np.ndarray]""" + del kwargs + images, bounding_boxes,transform_randoms = results + policy = lambda: [(op, p, self.magnitude_det) # pylint: disable=g-long-lambda + for op in self.color_policies_det] + images_aug = [] + shape = images[0].shape + for x in range(len(images)): + images_aug.append( + self.numpy_apply_policies((images[x:x + 1], [policy()]))[0]) + assert shape == images_aug[0].shape + if bounding_boxes is None: + return images_aug + images_aug = np.array(images_aug) + return images_aug, bounding_boxes, transform_randoms + + def set_k_for_bbox_affine_transform(self, **kwargs): + real_box_n = kwargs['n_real_box'] + self.k_det = np.random.choice([i for i in range(real_box_n)], 1)[0] + + def randomize_parameters(self, **kwargs): + self.cutout_op_det = self.cutout_op.to_deterministic() + self.affine_aug_op_det = self.affine_aug_op.to_deterministic() + self.inbox_affine_aug_op_det = self.inbox_affine_aug_op.to_deterministic() + self.magnitude_det = np.random.randint(1, self.magnitude) + self.color_policies_det = np.random.choice(RANDOM_COLOR_POLICY_OPS, 1) + # if 'n_real_box' in kwargs: + real_box_n = kwargs['n_real_box'] if 'n_real_box' in kwargs else 1 + self.k_det = np.random.choice([i for i in range(real_box_n)], 1)[0] + if len(self.augment_fn) > 0 and self.augment_fn[-1][0].__name__ == 'cutout_augment': + naug = len(self.augment_fn) + self.oder1_det = np.random.permutation(np.arange(naug - 1)) + else: + self.order2_det = np.random.permutation(np.arange(len(self.augment_fn))) + self.tmp = np.random.randint(0, 2) + + def __call__(self, results, **kwargs): + if self.is_train: + images, bounding_boxes,transform_randoms = results + # random order + if len(self.augment_fn) > 0 and self.augment_fn[-1][0].__name__ == 'cutout_augment': + # put cutout in the last always + naug = len(self.augment_fn) + order = self.oder1_det + order = np.concatenate([order, [naug - 1]], 0) + else: + order = self.order2_det + + # pylint: disable=invalid-name + T = None + for i in order: + fns = self.augment_fn[i] + # fn = fns[np.random.randint(0, len(fns))] + if len(fns) > 1: + fn = fns[self.tmp] + else: + fn = fns[0] + images, bounding_boxes, _T = fn( + results=(images, bounding_boxes, transform_randoms), **kwargs) + if _T is not None: + T = _T + return images, bounding_boxes, T + else: + return results + +if __name__ == '__main__': + import sys + sys.path.append('/mnt/cache/xingsen/VideoMAE_ava_ft3/') + img = np.random.randint(0, 255, (2, 100, 100, 3), dtype=np.uint8) + img = np.split(img,img.shape[0],axis=0) + img = [np.array(Image.open('/mnt/cache/xingsen/VideoMAE_ava_ft3/data/1.jpg'))] + for i in range(len(img)): + img[i] = img[i].squeeze() + print(type(img[0])) + from alphaction.structures.bounding_box import BoxList + + im_w, im_h = 100, 100 + n = 1 + xy = np.array([16,11]).reshape(1,2)#np.zeros([n,2])#np.uint8(np.random.random([n, 2]) * 50 + 30) + w = np.uint8(np.ones([n, 1]) * 53) + h = np.uint8(np.ones([n, 1]) * 62) + # cv2.rectangle(img[0], (16,11), (69,73), (0,255,0), 4) + # cv2.imwrite('test.jpg',img[0]) + boxes = np.concatenate([xy, w, h], axis=1) + boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) # guard against no boxes + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + xmin, ymin, xmax, ymax = boxes[0]._split_into_xyxy() + aug = RandomAugmentBBox() + aug.randomize_parameters() + images_aug, bbs_aug, _ = aug((img, [boxes], None)) + print(images_aug,bbs_aug) + # x1,y1,x2,y2 = bbs_aug[0][0].x1,bbs_aug[0][0].y1,bbs_aug[0][0].x2,bbs_aug[0][0].y2 + # if x1 < 0: + # x1 = 0 + # if y1 < 0: + # y1 = 0 + # cv2.rectangle(images_aug[0], (x1,y1), (x2,y2), (0,255,0), 4) + # cv2.imwrite('test2.jpg',images_aug[0]) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__init__.py b/Downstream/Spatial-Temporal-Action-Localization/data/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/__init__.py @@ -0,0 +1 @@ + diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/RandomAugmentBBox.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/RandomAugmentBBox.cpython-37.pyc new file mode 100644 index 0000000..8df63b4 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/RandomAugmentBBox.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/__init__.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..7d9d17c Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/augmentations.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/augmentations.cpython-37.pyc new file mode 100644 index 0000000..438e4d6 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/augmentations.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/ava.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/ava.cpython-37.pyc new file mode 100644 index 0000000..799011a Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/ava.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/ava_eval.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/ava_eval.cpython-37.pyc new file mode 100644 index 0000000..1b516e4 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/ava_eval.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/transforms.cpython-37.pyc b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/transforms.cpython-37.pyc new file mode 100644 index 0000000..9f24547 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/__pycache__/transforms.cpython-37.pyc differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/augmentations.py b/Downstream/Spatial-Temporal-Action-Localization/data/augmentations.py new file mode 100644 index 0000000..b5fdbe1 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/augmentations.py @@ -0,0 +1,432 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Transforms used in the Augmentation Policies.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random + +import numpy as np +# pylint:disable=g-multiple-import +from PIL import ImageOps, ImageEnhance, ImageFilter, Image + +# pylint:enable=g-multiple-import + +# IMAGE_SIZE = 32 +# What is the dataset mean and std of the images on the training set +# MEANS = [0.49139968, 0.48215841, 0.44653091] +# STDS = [0.24703223, 0.24348513, 0.26158784] +PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted + + +def random_flip(x): + """Flip the input x horizontally with 50% probability.""" + if np.random.rand(1)[0] > 0.5: + return np.fliplr(x) + return x + + +def zero_pad_and_crop(img, amount=4): + """Zero pad by `amount` zero pixels on each side then take a random crop. + + Args: + img: numpy image that will be zero padded and cropped. + amount: amount of zeros to pad `img` with horizontally and verically. + + Returns: + The cropped zero padded img. The returned numpy array will be of the same + shape as `img`. + """ + padded_img = np.zeros( + (img.shape[0] + amount * 2, img.shape[1] + amount * 2, img.shape[2])) + padded_img[amount:img.shape[0] + amount, + amount:img.shape[1] + amount, :] = img + top = np.random.randint(low=0, high=2 * amount) + left = np.random.randint(low=0, high=2 * amount) + new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :] + return new_img + + +def create_cutout_mask(img_height, img_width, num_channels, size): + """Creates a zero mask used for cutout of shape `img_height` x `img_width`. + + Args: + img_height: Height of image cutout mask will be applied to. + img_width: Width of image cutout mask will be applied to. + num_channels: Number of channels in the image. + size: Size of the zeros mask. + + Returns: + A mask of shape `img_height` x `img_width` with all ones except for a + square of zeros of shape `size` x `size`. This mask is meant to be + elementwise multiplied with the original image. Additionally returns + the `upper_coord` and `lower_coord` which specify where the cutout mask + will be applied. + """ + assert img_height == img_width + + # Sample center where cutout mask will be applied + height_loc = np.random.randint(low=0, high=img_height) + width_loc = np.random.randint(low=0, high=img_width) + + # Determine upper right and lower left corners of patch + upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2)) + lower_coord = (min(img_height, height_loc + size // 2), + min(img_width, width_loc + size // 2)) + mask_height = lower_coord[0] - upper_coord[0] + mask_width = lower_coord[1] - upper_coord[1] + assert mask_height > 0 + assert mask_width > 0 + + mask = np.ones((img_height, img_width, num_channels)) + mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = 0 + return mask, upper_coord, lower_coord + + +def cutout_numpy(img, size=16): + """Apply cutout with mask of shape `size` x `size` to `img`. + + The cutout operation is from the paper https://arxiv.org/abs/1708.04552. + This operation applies a `size`x`size` mask of zeros to a random location + within `img`. + + Args: + img: Numpy image that cutout will be applied to. + size: Height/width of the cutout mask that will be + + Returns: + A numpy tensor that is the result of applying the cutout mask to `img`. + """ + if size <= 0: + return img + assert len(img.shape) == 3 + img_height, img_width, num_channels = img.shape + mask = create_cutout_mask(img_height, img_width, num_channels, size)[0] + return img * mask + + +def float_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled to + level/PARAMETER_MAX. + + Returns: + A float that results from scaling `maxval` according to `level`. + """ + return float(level) * maxval / PARAMETER_MAX + + +def int_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled to + level/PARAMETER_MAX. + + Returns: + An int that results from scaling `maxval` according to `level`. + """ + return int(level * maxval / PARAMETER_MAX) + + +def pil_wrap(img, mean=0.5, std=0.5): + """Convert the `img` numpy tensor to a PIL Image.""" + return Image.fromarray(np.uint8((img * std + mean) * 255.0)).convert('RGBA') + + +def pil_unwrap(pil_img, mean=0.5, std=0.5): + """Converts the PIL img to a numpy array.""" + s = pil_img.size + pic_array = (np.array(pil_img.getdata()).reshape((s[0], s[1], 4)) / 255.0) + i1, i2 = np.where(pic_array[:, :, 3] == 0) + pic_array = (pic_array[:, :, :3] - mean) / std + pic_array[i1, i2] = [0, 0, 0] + return pic_array + + +def apply_policy(policy, img): + """Apply the `policy` to the numpy `img`. + + Args: + policy: A list of tuples with the form (name, probability, level) where + `name` is the name of the augmentation operation to apply, `probability` + is the probability of applying the operation and `level` is what + strength the operation to apply. + img: Numpy image that will have `policy` applied to it. + + Returns: + The result of applying `policy` to `img`. + """ + + pil_img = pil_wrap(img) + for xform in policy: + assert len(xform) == 3 + name, probability, level = xform + xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(probability, level) + pil_img = xform_fn(pil_img) + return pil_unwrap(pil_img) + + +class TransformFunction(object): + """Wraps the Transform function for pretty printing options.""" + + def __init__(self, func, name): + self.f = func + self.name = name + + def __repr__(self): + return '<' + self.name + '>' + + def __call__(self, pil_img): + return self.f(pil_img) + + +class TransformT(object): + """Each instance of this class represents a specific transform.""" + + def __init__(self, name, xform_fn): + self.name = name + self.xform = xform_fn + + def pil_transformer(self, probability, level): + + def return_function(im): + if random.random() < probability: + im = self.xform(im, level) + return im + + name = self.name + '({:.1f},{})'.format(probability, level) + return TransformFunction(return_function, name) + + def do_transform(self, image, level): + f = self.pil_transformer(PARAMETER_MAX, level) + return pil_unwrap(f(pil_wrap(image))) + + +################## Transform Functions ################## +identity = TransformT('Identity', lambda pil_img, level: pil_img) +flip_lr = TransformT( + 'FlipLR', lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT)) +flip_ud = TransformT( + 'FlipUD', lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM)) +# pylint:disable=g-long-lambda +auto_contrast = TransformT( + 'AutoContrast', lambda pil_img, level: ImageOps.autocontrast( + pil_img.convert('RGB')).convert('RGBA')) +equalize = TransformT( + 'Equalize', lambda pil_img, level: ImageOps.equalize( + pil_img.convert('RGB')).convert('RGBA')) +invert = TransformT( + 'Invert', lambda pil_img, level: ImageOps.invert(pil_img.convert('RGB')). + convert('RGBA')) +# pylint:enable=g-long-lambda +blur = TransformT('Blur', + lambda pil_img, level: pil_img.filter(ImageFilter.BLUR)) +smooth = TransformT('Smooth', + lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH)) + + +def _rotate_impl(pil_img, level): + """Rotates `pil_img` from -30 to 30 degrees depending on `level`.""" + degrees = int_parameter(level, 30) + if random.random() > 0.5: + degrees = -degrees + return pil_img.rotate(degrees) + + +rotate = TransformT('Rotate', _rotate_impl) + + +def _posterize_impl(pil_img, level): + """Applies PIL Posterize to `pil_img`.""" + level = int_parameter(level, 4) + return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA') + + +posterize = TransformT('Posterize', _posterize_impl) + + +def _shear_x_impl(pil_img, level): + """Applies PIL ShearX to `pil_img`. + + The ShearX operation shears the image along the horizontal axis with `level` + magnitude. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from [0, + `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had ShearX applied to it. + """ + level = float_parameter(level, 0.3) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, level, 0, 0, 1, 0)) + + +shear_x = TransformT('ShearX', _shear_x_impl) + + +def _shear_y_impl(pil_img, level): + """Applies PIL ShearY to `pil_img`. + + The ShearY operation shears the image along the vertical axis with `level` + magnitude. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from [0, + `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had ShearX applied to it. + """ + level = float_parameter(level, 0.3) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, level, 1, 0)) + + +shear_y = TransformT('ShearY', _shear_y_impl) + + +def _translate_x_impl(pil_img, level): + """Applies PIL TranslateX to `pil_img`. + + Translate the image in the horizontal direction by `level` + number of pixels. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from [0, + `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had TranslateX applied to it. + """ + level = int_parameter(level, 10) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, level, 0, 1, 0)) + + +translate_x = TransformT('TranslateX', _translate_x_impl) + + +def _translate_y_impl(pil_img, level): + """Applies PIL TranslateY to `pil_img`. + + Translate the image in the vertical direction by `level` + number of pixels. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from [0, + `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had TranslateY applied to it. + """ + level = int_parameter(level, 10) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, 0, 1, level)) + + +translate_y = TransformT('TranslateY', _translate_y_impl) + + +def _crop_impl(pil_img, level, interpolation=Image.BILINEAR): + """Applies a crop to `pil_img` with the size depending on the `level`.""" + cropped = pil_img.crop( + (level, level, pil_img.size[0] - level, pil_img.size[1] - level)) + resized = cropped.resize(pil_img.size, interpolation) + return resized + + +crop_bilinear = TransformT('CropBilinear', _crop_impl) + + +def _solarize_impl(pil_img, level): + """Applies PIL Solarize to `pil_img`. + + Translate the image in the vertical direction by `level` + number of pixels. + + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from [0, + `PARAMETER_MAX`]. + + Returns: + A PIL Image that has had Solarize applied to it. + """ + level = int_parameter(level, 256) + return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA') + + +solarize = TransformT('Solarize', _solarize_impl) + + +def _cutout_pil_impl(pil_img, level): + """Apply cutout to pil_img at the specified level.""" + size = int_parameter(level, 20) + if size <= 0: + return pil_img + img_width, img_height = pil_img.size + num_channels = 3 + _, upper_coord, lower_coord = ( + create_cutout_mask(img_height, img_width, num_channels, size)) + pixels = pil_img.load() # create the pixel map + for i in range(upper_coord[0], lower_coord[0]): # for every col: + for j in range(upper_coord[1], lower_coord[1]): # For every row + pixels[i, j] = (127, 127, 127, 0) # set the colour accordingly + return pil_img + + +cutout = TransformT('Cutout', _cutout_pil_impl) + + +def _enhancer_impl(enhancer): + """Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL.""" + + def impl(pil_img, level): + v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it + return enhancer(pil_img).enhance(v) + + return impl + + +color = TransformT('Color', _enhancer_impl(ImageEnhance.Color)) +contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast)) +brightness = TransformT('Brightness', _enhancer_impl(ImageEnhance.Brightness)) +sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness)) + +ALL_TRANSFORMS = [ + identity, flip_lr, flip_ud, auto_contrast, equalize, invert, rotate, + posterize, crop_bilinear, solarize, color, contrast, brightness, sharpness, + shear_x, shear_y, translate_x, translate_y, cutout, blur, smooth +] + +NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS} +TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys() \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/ava.py b/Downstream/Spatial-Temporal-Action-Localization/data/ava.py new file mode 100644 index 0000000..54f4aa2 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/ava.py @@ -0,0 +1,522 @@ +import os +from venv import main +import torch.utils.data as data +import time +import torch +import numpy as np +import sys +import torch.nn.functional as F +sys.path.append('/mnt/cache/xingsen/xingsen2/VideoMAE_ava') +from alphaction.structures.bounding_box import BoxList +# from transforms import build_transforms +from collections import defaultdict +from alphaction.utils.video_decode import av_decode_video +from alphaction.dataset.datasets.ava import NpInfoDict, NpBoxDict +import pdb +import json +import decord +import logging +from tqdm import tqdm + + +class AKDataset(data.Dataset): + def __init__(self,kinetics_annfile,ava_annfile,video_root,frame_span,remove_clips_without_annotations,kinetics_box_file=None,ava_box_file=None, + ava_eval_file_path={},kinetics_eval_file_path={},eval_file_paths={},box_thresh=0.0,action_thresh=0.0,transforms=None): + self.action_thresh = action_thresh + self.box_thresh = box_thresh + self.eval_file_paths = eval_file_paths + self.remove_clips_without_annotations = remove_clips_without_annotations + self.kinetics_dataset = KineticsDataset(kinetics_annfile=kinetics_annfile, + frame_span=frame_span, + remove_clips_without_annotations=True, + box_file=kinetics_box_file, + eval_file_paths=kinetics_eval_file_path, + box_thresh=box_thresh, + action_thresh=action_thresh, + transforms=transforms) + self.ava_dataset = AVAVideoDataset( + video_root=video_root, + ann_file=ava_annfile, + frame_span=frame_span, + remove_clips_without_annotations=remove_clips_without_annotations, + box_file=ava_box_file, + eval_file_paths=ava_eval_file_path, + box_thresh=box_thresh, + action_thresh=action_thresh, + transforms=transforms) + + def __len__(self): + return len(self.kinetics_dataset) + len(self.ava_dataset) + + def __getitem__(self, index): + if self.remove_clips_without_annotations: + missing_id = [17261,35964,52484,97042] + if index in missing_id: + return self.kinetics_dataset[-1] + if index < len(self.kinetics_dataset): + return self.kinetics_dataset[index] + else: + return self.ava_dataset[index - len(self.kinetics_dataset)] + + def get_video_info(self, index): + if index < len(self.kinetics_dataset): + return self.kinetics_dataset.get_video_info(index) + else: + return self.ava_dataset.get_video_info(index - len(self.kinetics_dataset)) + + + +class KineticsDataset(data.Dataset): + def __init__(self,kinetics_annfile,frame_span,remove_clips_without_annotations, box_file=None, + eval_file_paths={},box_thresh=0.0,action_thresh=0.0,transforms=None): + print('loading annotations into memory...') + tic = time.time() + self.kinetics_annfile = kinetics_annfile + kinetics_dict = json.load(open(kinetics_annfile,'r')) + assert type(kinetics_dict) == dict, 'annotation file format {} not supported'.format(type(kinetics_dict)) + self.transforms = transforms + self.frame_span = frame_span + self.video_map = np.load('../annotations/video_path_k700.npy',allow_pickle=True).item() + self.video_size_map = np.load('../annotations/movie_size.npy',allow_pickle=True).item() + # These two attributes are used during ava evaluation... + # Maybe there is a better implementation + self.eval_file_paths = eval_file_paths + self.action_thresh = action_thresh + self.clip2ann_kinetics = self.load_ann_file(kinetics_dict) + self.movie_info_kinetics, self.clip_ids_kinetics, self.clips_info_kinetics = self.get_videos_info(kinetics_dict) + + if remove_clips_without_annotations: + self.clip_ids_kinetics = [clip_id for clip_id in self.clip_ids_kinetics if clip_id in self.clip2ann_kinetics] + # pdb.set_trace() + valid_id = self.del_invalid_id() #23315 + self.clip_ids_kinetics = valid_id + if box_file: + imgToBoxes = self.load_box_file(box_file,box_thresh) # val len 23224 + clip_ids = [ + img_id + for img_id in self.clip_ids_kinetics # + if len(imgToBoxes[img_id]) > 0 + ] #val len 21738 + self.clip_ids_kinetics = clip_ids + self.det_persons = NpBoxDict(imgToBoxes,clip_ids, + value_types=[("bbox", np.float32), ("score", np.float32)]) #21738 + else: + self.det_persons = None + + self.anns = NpBoxDict(self.clip2ann_kinetics,self.clip_ids_kinetics,value_types=[("bbox", np.float32), ("packed_act", np.uint8)]) + clips_info = { + clip_id: + [ + self.movie_info_kinetics.convert_key(self.clips_info_kinetics[clip_id][0]), + self.clips_info_kinetics[clip_id][1] + ] for clip_id in self.clip_ids_kinetics + } + self.clips_info = NpInfoDict(clips_info,value_type=np.int32) + # pdb.set_trace() + super().__init__() + + def __getitem__(self, idx): + _, clip_info = self.clips_info[idx] + + mov_id, timestamp = clip_info + + movie_id, _ = self.movie_info_kinetics[mov_id] + video_data = self._decode_video_data(movie_id, timestamp) + _,im_h,im_w,_ = video_data.shape + if self.det_persons is None: + # Note: During training, we only use gt. Thus we should not provide box file, + # otherwise we will use only box file instead. + boxes, packed_act = self.anns[idx] + boxes = boxes * np.array([im_w, im_h, im_w, im_h]) + boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) # guard against no boxes + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + + # Decode the packed bits from uint8 to one hot, since AVA has 80 classes, + # it can be exactly denoted with 10 bytes, otherwise we may need to discard some bits. + one_hot_label = np.unpackbits(packed_act, axis=1) + one_hot_label = torch.as_tensor(one_hot_label, dtype=torch.uint8) + + boxes.add_field("labels", one_hot_label) # 80 + + else: + boxes, box_score = self.det_persons[idx] + logging.info("box is {},h:{},w:{}".format(boxes,im_h,im_w)) + boxes = boxes * np.array([im_w, im_h, im_w, im_h]) + boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xyxy") + + box_score_tensor = torch.as_tensor(box_score, dtype=torch.float32).reshape(-1, 1) + boxes.add_field("scores", box_score_tensor) + + boxes = boxes.clip_to_image(remove_empty=True) + if self.transforms is not None: + video_data, boxes, transform_randoms = self.transforms(video_data, boxes) + + return video_data, boxes, idx + + def __len__(self): + return len(self.clips_info) + + def _decode_video_data(self, dirname, timestamp): + # decode target video data from segment per second. + video_path = self.video_map[dirname] + time_list = video_path[:-4].split('_')[-2:] + start_time = int(time_list[0]) + end_time = int(time_list[1]) + time_offset = (timestamp - start_time)/(end_time-start_time) + frames = av_decode_video(video_path) + mid_frame = int(time_offset * len(frames)) + right_span = self.frame_span//2 + mid_frame + left_span = right_span - self.frame_span + if left_span < 0 and right_span <= len(frames): + frames_left = self.numpy2tensorinterpolate(frames[0:mid_frame],self.frame_span-(self.frame_span//2)) + frames_right = frames[mid_frame:right_span] + frames = np.concatenate([frames_left,np.stack(frames_right)],axis=0) + elif left_span >= 0 and right_span > len(frames): + frames_left = frames[left_span:mid_frame] + try: + frames_right = self.numpy2tensorinterpolate(frames[mid_frame:],self.frame_span//2) + frames = np.concatenate([np.stack(frames_left),frames_right],axis=0) + except ValueError: + print(mid_frame,len(frames),time_offset,timestamp,start_time,end_time,dirname) + exit(0) + else: + frames = np.stack(frames[left_span:right_span]) + + return frames + + def numpy2tensorinterpolate(self,aArray,time_len): + aArray = torch.from_numpy(np.stack(aArray)).unsqueeze(0).float() + aArray = aArray.permute(0,4,1,2,3) # 1 * channel * time * h * w + _, c, t, h, w = aArray.shape + aArray = F.interpolate(aArray,size=[time_len,h,w],mode='trilinear',align_corners=False) + aArray = aArray.squeeze() + aArray = np.uint8(aArray.permute(1,2,3,0).numpy()) + return aArray + + def del_invalid_id(self): + valid_clip_ids = [] + for clip_id in self.clip_ids_kinetics: + timestamp = int(clip_id.split('_')[-1]) + assert timestamp == self.clips_info_kinetics[clip_id][1] + mov_id = self.movie_info_kinetics.convert_key(self.clips_info_kinetics[clip_id][0]) + movie_id,_ = self.movie_info_kinetics[mov_id] + video_path = self.video_map[movie_id] + try: + time_list = video_path[:-4].split('_')[-2:] + except TypeError: + continue + start_time = int(time_list[0]) + end_time = int(time_list[1]) + if timestamp > start_time and timestamp < end_time: + valid_clip_ids.append(clip_id) + return valid_clip_ids + + def load_ann_file(self,json_dict): + clip2ann = defaultdict(list) + if "annotations" in json_dict: + for ann in json_dict["annotations"]: + try: + action_ids = np.array(ann["action_ids"],dtype=np.uint8) + except ValueError: + continue + one_hot = np.zeros(81, dtype=np.bool) + one_hot[action_ids] = True + packed_act = np.packbits(one_hot[1:]) + clip2ann[ann["image_id"]].append(dict(bbox=ann["bbox"], packed_act=packed_act)) + return clip2ann + + def get_videos_info(self,json_dict): + movies_size = {} + clips_info = {} + for img in json_dict["images"]: + mov = img["movie"] + if mov not in movies_size: + movies_size[mov] = [img["width"],img["height"]] + clips_info[img["id"]] = [mov, img["timestamp"]] + movie_info = NpInfoDict(movies_size, value_type=np.int32) + clip_ids = sorted(list(clips_info.keys())) + return movie_info, clip_ids, clips_info + + def get_video_info(self, index): + _, clip_info = self.clips_info[index] + # mov_id is the id in self.movie_info + mov_id, timestamp = clip_info + # movie_id is the human-readable youtube id. + movie_id, movie_size = self.movie_info_kinetics[mov_id] + h, w = self.video_size_map[movie_id] + return dict(width=w, height=h, movie=movie_id, timestamp=timestamp) + + def load_box_file(self, box_file, score_thresh=0.0): + import json + + print('Loading box file into memory...') + tic = time.time() + with open(box_file, "r") as f: + box_results = json.load(f) + print('Done (t={:0.2f}s)'.format(time.time() - tic)) + + boxImgIds = [box['image_id'] for box in box_results] + + imgToBoxes = defaultdict(list) + for img_id, box in zip(boxImgIds, box_results): + if box['score'] >= score_thresh: + imgToBoxes[img_id].append(box) + return imgToBoxes + + def getitem(self,idx): + return self.__getitem__(idx) + + +class AVAVideoDataset(data.Dataset): + def __init__(self, video_root, ann_file, remove_clips_without_annotations, frame_span, box_file=None, + eval_file_paths={}, box_thresh=0.0, action_thresh=0.0, transforms=None): + + print('loading annotations into memory...') + tic = time.time() + json_dict = json.load(open(ann_file, 'r')) + assert type(json_dict) == dict, 'annotation file format {} not supported'.format(type(json_dict)) + print('Done (t={:0.2f}s)'.format(time.time() - tic)) + + self.video_root = video_root + self.transforms = transforms + self.frame_span = frame_span + + # These two attributes are used during ava evaluation... + # Maybe there is a better implementation + self.eval_file_paths = eval_file_paths + self.action_thresh = action_thresh + + clip2ann = defaultdict(list) + if "annotations" in json_dict: + for ann in json_dict["annotations"]: + action_ids = ann["action_ids"] + one_hot = np.zeros(81, dtype=np.bool) + one_hot[action_ids] = True + packed_act = np.packbits(one_hot[1:]) + clip2ann[ann["image_id"]].append(dict(bbox=ann["bbox"], packed_act=packed_act)) + + movies_size = {} + clips_info = {} + for img in json_dict["images"]: + mov = img["movie"] + if mov not in movies_size: + movies_size[mov] = [img["width"], img["height"]] + clips_info[img["id"]] = [mov, img["timestamp"]] + self.movie_info = NpInfoDict(movies_size, value_type=np.int32) + clip_ids = sorted(list(clips_info.keys())) + + if remove_clips_without_annotations: + clip_ids = [clip_id for clip_id in clip_ids if clip_id in clip2ann] + + if box_file: + # this is only for validation or testing + # we use detected boxes, so remove clips without boxes detected. + imgToBoxes = self.load_box_file(box_file, box_thresh) + clip_ids = [ + img_id + for img_id in clip_ids + if len(imgToBoxes[img_id]) > 0 + ] + self.det_persons = NpBoxDict(imgToBoxes, clip_ids, + value_types=[("bbox", np.float32), ("score", np.float32)]) + else: + self.det_persons = None + + self.anns = NpBoxDict(clip2ann, clip_ids, value_types=[("bbox", np.float32), ("packed_act", np.uint8)]) + + clips_info = { # key + clip_id: + [ + self.movie_info.convert_key(clips_info[clip_id][0]), + clips_info[clip_id][1] + ] for clip_id in clip_ids + } + self.clips_info = NpInfoDict(clips_info, value_type=np.int32) + + def __getitem__(self, idx): + + _, clip_info = self.clips_info[idx] + + # mov_id is the id in self.movie_info + mov_id, timestamp = clip_info + # movie_id is the human-readable youtube id. + movie_id, movie_size = self.movie_info[mov_id] + video_data = self._decode_video_data(movie_id, timestamp) + + im_w, im_h = movie_size + + if self.det_persons is None: + # Note: During training, we only use gt. Thus we should not provide box file, + # otherwise we will use only box file instead. + + boxes, packed_act = self.anns[idx] + + boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) # guard against no boxes + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + + # Decode the packed bits from uint8 to one hot, since AVA has 80 classes, + # it can be exactly denoted with 10 bytes, otherwise we may need to discard some bits. + one_hot_label = np.unpackbits(packed_act, axis=1) + one_hot_label = torch.as_tensor(one_hot_label, dtype=torch.uint8) + + boxes.add_field("labels", one_hot_label) # 80 + + else: + boxes, box_score = self.det_persons[idx] + boxes_tensor = torch.as_tensor(boxes).reshape(-1, 4) + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + + box_score_tensor = torch.as_tensor(box_score, dtype=torch.float32).reshape(-1, 1) + boxes.add_field("scores", box_score_tensor) + + boxes = boxes.clip_to_image(remove_empty=True) + + if self.transforms is not None: + video_data, boxes, transform_randoms = self.transforms(video_data, boxes) + + return video_data, boxes, idx + + def get_video_info(self, index): + _, clip_info = self.clips_info[index] + # mov_id is the id in self.movie_info + mov_id, timestamp = clip_info + # movie_id is the human-readable youtube id. + movie_id, movie_size = self.movie_info[mov_id] + w, h = movie_size + return dict(width=w, height=h, movie=movie_id, timestamp=timestamp) + + def load_box_file(self, box_file, score_thresh=0.0): + import json + + print('Loading box file into memory...') + tic = time.time() + with open(box_file, "r") as f: + box_results = json.load(f) + print('Done (t={:0.2f}s)'.format(time.time() - tic)) + + boxImgIds = [box['image_id'] for box in box_results] + + imgToBoxes = defaultdict(list) + for img_id, box in zip(boxImgIds, box_results): + if box['score'] >= score_thresh: + imgToBoxes[img_id].append(box) + return imgToBoxes + + def _decode_video_data(self, dirname, timestamp): + # decode target video data from segment per second. + + video_folder = os.path.join(self.video_root, dirname) + right_span = self.frame_span//2 + left_span = self.frame_span - right_span + + #load right + cur_t = timestamp + right_frames = [] + while len(right_frames)= score_thresh) + boxes = boxes[box_ids, :] + scores = scores[box_ids, action_ids] + action_ids = action_ids + 1 + + movie_name = video_info['movie'] + timestamp = video_info['timestamp'] + + clip_key = make_image_key(movie_name, timestamp) + + ava_results[clip_key] = { + "boxes": boxes, + "scores": scores, + "action_ids": action_ids + } + return ava_results + + +def read_exclusions(exclusions_file): + """Reads a CSV file of excluded timestamps. + + Args: + exclusions_file: Path of file containing a csv of video-id,timestamp. + + Returns: + A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904", + or an empty set if exclusions file is None. + """ + excluded = set() + exclusions_file = open(exclusions_file, 'r') + if exclusions_file: + reader = csv.reader(exclusions_file) + for row in reader: + assert len(row) == 2, "Expected only 2 columns, got: {}".format(row) + excluded.add(make_image_key(row[0], row[1])) + return excluded + + +def read_labelmap(labelmap_file): + """Reads a labelmap without the dependency on protocol buffers. + + Args: + labelmap_file: Path of file containing a label map protocol buffer. + + Returns: + labelmap: The label map in the form used by the object_detection_evaluation + module - a list of {"id": integer, "name": classname } dicts. + class_ids: A set containing all of the valid class id integers. + """ + labelmap = [] + class_ids = set() + name = "" + class_id = "" + labelmap_file = open(labelmap_file, 'r') + for line in labelmap_file: + if line.startswith(" name:"): + name = line.split('"')[1] + elif line.startswith(" id:") or line.startswith(" label_id:"): + class_id = int(line.strip().split(" ")[-1]) + labelmap.append({"id": class_id, "name": name}) + class_ids.add(class_id) + return labelmap, class_ids + + +def read_csv(csv_file, class_whitelist=None): + """Loads boxes and class labels from a CSV file in the AVA format. + + CSV file format described at https://research.google.com/ava/download.html. + + Args: + csv_file: Path of csv file. + class_whitelist: If provided, boxes corresponding to (integer) class labels + not in this set are skipped. + + Returns: + boxes: A dictionary mapping each unique image key (string) to a list of + boxes, given as coordinates [y1, x1, y2, x2]. + labels: A dictionary mapping each unique image key (string) to a list of + integer class lables, matching the corresponding box in `boxes`. + scores: A dictionary mapping each unique image key (string) to a list of + score values lables, matching the corresponding label in `labels`. If + scores are not provided in the csv, then they will default to 1.0. + """ + start = time.time() + boxes = defaultdict(list) + labels = defaultdict(list) + scores = defaultdict(list) + csv_file = open(csv_file, 'r') + reader = csv.reader(csv_file) + for row in reader: + assert len(row) in [7, 8], "Wrong number of columns: " + row + image_key = make_image_key(row[0], row[1]) + x1, y1, x2, y2 = [float(n) for n in row[2:6]] + action_id = int(row[6]) + if class_whitelist and action_id not in class_whitelist: + continue + score = 1.0 + if len(row) == 8: + score = float(row[7]) + boxes[image_key].append([y1, x1, y2, x2]) + labels[image_key].append(action_id) + scores[image_key].append(score) + print_time("read file " + csv_file.name, start) + return boxes, labels, scores + + +def write_csv(ava_results, csv_result_file): + start = time.time() + with open(csv_result_file, 'w') as csv_file: + spamwriter = csv.writer(csv_file, delimiter=',') + for clip_key in ava_results: + movie_name, timestamp = decode_image_key(clip_key) + cur_result = ava_results[clip_key] + boxes = cur_result["boxes"] + scores = cur_result["scores"] + action_ids = cur_result["action_ids"] + assert boxes.shape[0] == scores.shape[0] == action_ids.shape[0] + for box, score, action_id in zip(boxes, scores, action_ids): + box_str = ['{:.5f}'.format(cord) for cord in box] + score_str = '{:.5f}'.format(score) + spamwriter.writerow([movie_name, timestamp, ] + box_str + [action_id, score_str]) + print_time("write file " + csv_result_file, start) + + +def print_time(message, start): + logging.info("==> %g seconds to %s", time.time() - start, message) + + +def evaluate_predictions_on_ava(eval_file_paths, ava_results, csv_result_file): + write_csv(ava_results, csv_result_file) + + groundtruth = eval_file_paths["csv_gt_file"] + labelmap = eval_file_paths["labelmap_file"] + exclusions = eval_file_paths["exclusion_file"] + + categories, class_whitelist = read_labelmap(labelmap) + logging.info("CATEGORIES (%d):\n%s", len(categories), + pformat(categories, indent=2)) + excluded_keys = read_exclusions(exclusions) + + pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator( + categories) + + # Reads the ground truth dataset. + boxes, labels, _ = read_csv(groundtruth, class_whitelist) + start = time.time() + for image_key in boxes: + if image_key in excluded_keys: + logging.info(("Found excluded timestamp in ground truth: %s. " + "It will be ignored."), image_key) + continue + pascal_evaluator.add_single_ground_truth_image_info( + image_key, { + standard_fields.InputDataFields.groundtruth_boxes: + np.array(boxes[image_key], dtype=float), + standard_fields.InputDataFields.groundtruth_classes: + np.array(labels[image_key], dtype=int), + standard_fields.InputDataFields.groundtruth_difficult: + np.zeros(len(boxes[image_key]), dtype=bool) + }) + print_time("convert groundtruth", start) + + # Reads detections dataset. + boxes, labels, scores = read_csv(csv_result_file, class_whitelist) + start = time.time() + for image_key in boxes: + if image_key in excluded_keys: + logging.info(("Found excluded timestamp in detections: %s. " + "It will be ignored."), image_key) + continue + pascal_evaluator.add_single_detected_image_info( + image_key, { + standard_fields.DetectionResultFields.detection_boxes: + np.array(boxes[image_key], dtype=float), + standard_fields.DetectionResultFields.detection_classes: + np.array(labels[image_key], dtype=int), + standard_fields.DetectionResultFields.detection_scores: + np.array(scores[image_key], dtype=float) + }) + print_time("convert detections", start) + + start = time.time() + metrics = pascal_evaluator.evaluate() + print_time("run_evaluator", start) + return metrics diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/movie_size.npy b/Downstream/Spatial-Temporal-Action-Localization/data/movie_size.npy new file mode 100644 index 0000000..b9539cb Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/data/movie_size.npy differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/rand_aug.py b/Downstream/Spatial-Temporal-Action-Localization/data/rand_aug.py new file mode 100644 index 0000000..8cec10a --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/rand_aug.py @@ -0,0 +1,1115 @@ +""" +Modified from https://github.com/google-research/ssl_detection/blob/master/detection/utils/augmentation.py. +""" +import copy +from unittest import result + +import cv2 +import mmcv +import numpy as np +from PIL import Image, ImageEnhance, ImageOps +from mmcv.image.colorspace import bgr2rgb, rgb2bgr +from mmdet.core.mask import BitmapMasks, PolygonMasks +from mmdet.datasets import PIPELINES +from mmdet.datasets.pipelines import Compose as BaseCompose +from mmdet.datasets.pipelines import transforms + +import numpy as np +import pdb + + +class GTrans(object): + @classmethod + def inverse(cls, results): + # compute the inverse + return results["transform_matrix"].I # 3x3 + + @classmethod + def apply(self, results, operator, **kwargs): + trans_matrix = getattr(self, f"_get_{operator}_matrix")(**kwargs) + if "transform_matrix" not in results: + results["transform_matrix"] = trans_matrix + else: + base_transformation = results["transform_matrix"] + results["transform_matrix"] = np.dot(trans_matrix, base_transformation) + + @classmethod + def apply_cv2_matrix(self, results, cv2_matrix): + if cv2_matrix.shape[0] == 2: + mat = np.concatenate( + [cv2_matrix, np.array([0, 0, 1]).reshape((1, 3))], axis=0 + ) + else: + mat = cv2_matrix + base_transformation = results["transform_matrix"] + results["transform_matrix"] = np.dot(mat, base_transformation) + return results + + @classmethod + def _get_rotate_matrix(cls, degree=None, cv2_rotation_matrix=None, inverse=False): + # TODO: this is rotated by zero point + if degree is None and cv2_rotation_matrix is None: + raise ValueError( + "At least one of degree or rotation matrix should be provided" + ) + if degree: + if inverse: + degree = -degree + rad = degree * np.pi / 180 + sin_a = np.sin(rad) + cos_a = np.cos(rad) + return np.array([[cos_a, sin_a, 0], [-sin_a, cos_a, 0], [0, 0, 1]]) # 2x3 + else: + mat = np.concatenate( + [cv2_rotation_matrix, np.array([0, 0, 1]).reshape((1, 3))], axis=0 + ) + if inverse: + mat = mat * np.array([[1, -1, -1], [-1, 1, -1], [1, 1, 1]]) + return mat + + @classmethod + def _get_shift_matrix(cls, dx=0, dy=0, inverse=False): + if inverse: + dx = -dx + dy = -dy + return np.array([[1, 0, dx], [0, 1, dy], [0, 0, 1]]) + + @classmethod + def _get_shear_matrix( + cls, degree=None, magnitude=None, direction="horizontal", inverse=False + ): + if magnitude is None: + assert degree is not None + rad = degree * np.pi / 180 + magnitude = np.tan(rad) + + if inverse: + magnitude = -magnitude + if direction == "horizontal": + shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0], [0, 0, 1]]) + else: + shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0], [0, 0, 1]]) + return shear_matrix + + @classmethod + def _get_flip_matrix(cls, shape, direction="horizontal", inverse=False): + h, w = shape + if direction == "horizontal": + flip_matrix = np.float32([[-1, 0, w], [0, 1, 0], [0, 0, 1]]) + else: + flip_matrix = np.float32([[1, 0, 0], [0, h - 1, 0], [0, 0, 1]]) + return flip_matrix + + @classmethod + def _get_scale_matrix(cls, sx, sy, inverse=False): + if inverse: + sx = 1 / sx + sy = 1 / sy + return np.float32([[sx, 0, 0], [0, sy, 0], [0, 0, 1]]) + +PARAMETER_MAX = 10 + + +def int_parameter(level, maxval, max_level=None): + if max_level is None: + max_level = PARAMETER_MAX + return int(level * maxval / max_level) + + +def float_parameter(level, maxval, max_level=None): + if max_level is None: + max_level = PARAMETER_MAX + return float(level) * maxval / max_level + +class SplitClip(object): + def __init__(self,datatype=None): + self.datatype = datatype + + def __call__(self,results): + clips, box, transform_randoms = results + clip_list = np.split(clips,clips.shape[0],axis=0) + for i in range(len(clip_list)): + clip_list[i] = clip_list[i].squeeze() + return clip_list,box,transform_randoms + + def enable_record(self, mode: bool = True): + self.record = mode + +class Composeclip(object): + def __init__(self,datatype=None): + self.datatype = datatype + + def __call__(self,results): + clip_list, box,transform_randoms = results + for i in range(len(clip_list)): + clip_list[i] = np.expand_dims(clip_list[i], axis=0) + clip = np.concatenate(clip_list,axis=0) + return clip,box,transform_randoms + + def enable_record(self, mode: bool = True): + self.record = mode + + + + +class RandAug(object): + """refer to https://github.com/google-research/ssl_detection/blob/00d52272f61b56eade8d5ace18213cba6c74f6d8/detection/utils/augmentation.py#L240.""" + + def __init__( + self, + prob: float = 1.0, + magnitude: int = 10, + random_magnitude: bool = True, + record: bool = False, + magnitude_limit: int = 10, + ): + assert 0 <= prob <= 1, f"probability should be in (0,1) but get {prob}" + assert ( + magnitude <= PARAMETER_MAX + ), f"magnitude should be small than max value {PARAMETER_MAX} but get {magnitude}" + + self.prob = prob + self.magnitude = magnitude + self.magnitude_limit = magnitude_limit + self.random_magnitude = random_magnitude + self.record = record + self.buffer = None + + def __call__(self, results): + if np.random.random() < self.prob: + magnitude = self.magnitude + if self.random_magnitude: + magnitude = np.random.randint(1, magnitude) + if self.record: + if "aug_info" not in results: + results["aug_info"] = [] + results["aug_info"].append(self.get_aug_info(magnitude=magnitude)) + results = self.apply(results, magnitude) + # clear buffer + return results + + def apply(self, clip_list, box, magnitude: int = None): + raise NotImplementedError() + + def __repr__(self): + return f"{self.__class__.__name__}(prob={self.prob},magnitude={self.magnitude},max_magnitude={self.magnitude_limit},random_magnitude={self.random_magnitude})" + + def get_aug_info(self, **kwargs): + aug_info = dict(type=self.__class__.__name__) + aug_info.update( + dict( + prob=1.0, + random_magnitude=False, + record=False, + magnitude=self.magnitude, + ) + ) + aug_info.update(kwargs) + return aug_info + + def enable_record(self, mode: bool = True): + self.record = mode + + +@PIPELINES.register_module() +class Identity(RandAug): + def apply(self, results, magnitude: int = None): + return results + + +@PIPELINES.register_module() +class AutoContrast(RandAug): ###将图像对比度最大化 + def apply(self, results, magnitude=None): + clip_list, box, transform_randoms = results + new_clip_list = [] + for clip in clip_list: + new_clip_list.append(np.asarray(ImageOps.autocontrast(Image.fromarray(clip)), dtype=clip.dtype)) + return new_clip_list,box,transform_randoms + + +@PIPELINES.register_module() +class RandEqualize(RandAug): ###均衡图像的直方图。该函数使用一个非线性映射到输入图像,为了产生灰色值均匀分布的输出图像 + def apply(self, results, magnitude=None): + clip_list, box, transform_randoms = results + new_clip_list = [] + for clip in clip_list: + new_clip_list.append(np.asarray(ImageOps.equalize(Image.fromarray(clip)), dtype=clip.dtype)) + return new_clip_list,box,transform_randoms + + +@PIPELINES.register_module() +class RandSolarize(RandAug): ###曝光图像,反转所有高于阈值的像素值 + def apply(self, results, magnitude=None): + clip_list, box, transform_randoms = results + new_clip_list = [] + for clip in clip_list: + new_clip_list.append(mmcv.solarize( + clip, min(int_parameter(magnitude, 256, self.magnitude_limit), 255) + )) + return new_clip_list,box,transform_randoms + + +def _enhancer_impl(enhancer): + """Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of + PIL.""" + + def impl(pil_img, level, max_level=None): + v = float_parameter(level, 1.8, max_level) + 0.1 # going to 0 just destroys it + return enhancer(pil_img).enhance(v) + + return impl + + +class RandEnhance(RandAug): + op = None + + def apply(self,results, magnitude=None): + clip_list, box = results + new_clip_list = [] + for clip in clip_list: + new_clip_list.append( + np.asarray( + _enhancer_impl(self.op)( + Image.fromarray(clip), magnitude, self.magnitude_limit + ), + dtype=clip.dtype, + ) + ) + return new_clip_list,box + + +@PIPELINES.register_module() +class RandColor(RandEnhance): ##颜色增强 + op = ImageEnhance.Color + + +@PIPELINES.register_module() +class RandContrast(RandEnhance): ## 对比度增强类用于调整图像的对比度。类似于调整彩色电视机的对比度 + op = ImageEnhance.Contrast + + +@PIPELINES.register_module() +class RandBrightness(RandEnhance): ##亮度增强 + op = ImageEnhance.Brightness + + +@PIPELINES.register_module() +class RandSharpness(RandEnhance): ##锐度增强 + op = ImageEnhance.Sharpness + + +@PIPELINES.register_module() +class RandPosterize(RandAug): ###将每个颜色通道上变量bits对应的低(8-bits)个bit置0。变量bits的取值范围为[0,8] + def apply(self, results, magnitude=None): + clip_list, box, transform_randoms = results + new_clip_list = [] + for clip in clip_list: + new_clip_list.append(np.asarray(ImageOps.posterize(Image.fromarray(clip), 4-magnitude), dtype=clip.dtype)) + return new_clip_list,box,transform_randoms + + +@PIPELINES.register_module() +class Sequential(BaseCompose): + def __init__(self, transforms, record: bool = False): + super().__init__(transforms) + self.record = record + self.enable_record(record) + + def enable_record(self, mode: bool = True): + # enable children to record + self.record = mode + for transform in self.transforms: + transform.enable_record(mode) + + +@PIPELINES.register_module() +class OneOf(Sequential): + def __init__(self, transforms, record: bool = False): + self.transforms = [] + for trans in transforms: + if isinstance(trans, list): + self.transforms.append(Sequential(trans)) + else: + assert isinstance(trans, dict) + self.transforms.append(Sequential([trans])) + self.enable_record(record) + + def __call__(self, results): + transform = np.random.choice(self.transforms) + return transform(results) + + +@PIPELINES.register_module() +class ShuffledSequential(Sequential): + def __call__(self, data): + order = np.random.permutation(len(self.transforms)) + for idx in order: + t = self.transforms[idx] + data = t(data) + if data is None: + return None + return data + + +""" +Geometric Augmentation. Modified from thirdparty/mmdetection/mmdet/datasets/pipelines/auto_augment.py +""" + + +def bbox2fields(): + """The key correspondence from bboxes to labels, masks and + segmentations.""" + bbox2label = {"gt_bboxes": "gt_labels", "gt_bboxes_ignore": "gt_labels_ignore"} + bbox2mask = {"gt_bboxes": "gt_masks", "gt_bboxes_ignore": "gt_masks_ignore"} + bbox2seg = { + "gt_bboxes": "gt_semantic_seg", + } + return bbox2label, bbox2mask, bbox2seg + + +class GeometricAugmentation(object): + def __init__( + self, + img_fill_val=125, + seg_ignore_label=255, + min_size=0, + prob: float = 1.0, + random_magnitude: bool = True, + record: bool = False, + ): + if isinstance(img_fill_val, (float, int)): + img_fill_val = tuple([float(img_fill_val)] * 3) + elif isinstance(img_fill_val, tuple): + assert len(img_fill_val) == 3, "img_fill_val as tuple must have 3 elements." + img_fill_val = tuple([float(val) for val in img_fill_val]) + assert np.all( + [0 <= val <= 255 for val in img_fill_val] + ), "all elements of img_fill_val should between range [0,255]." + self.img_fill_val = img_fill_val + self.seg_ignore_label = seg_ignore_label + self.min_size = min_size + self.prob = prob + self.random_magnitude = random_magnitude + self.record = record + + def __call__(self, results): + if np.random.random() < self.prob: + magnitude: dict = self.get_magnitude(results) + if self.record: + if "aug_info" not in results: + results["aug_info"] = [] + results["aug_info"].append(self.get_aug_info(**magnitude)) + results = self.apply(results, **magnitude) + self._filter_invalid(results, min_size=self.min_size) + return results + + def get_magnitude(self, results) -> dict: + raise NotImplementedError() + + def apply(self, results, **kwargs): + raise NotImplementedError() + + def enable_record(self, mode: bool = True): + self.record = mode + + def get_aug_info(self, **kwargs): + aug_info = dict(type=self.__class__.__name__) + aug_info.update( + dict( + # make op deterministic + prob=1.0, + random_magnitude=False, + record=False, + img_fill_val=self.img_fill_val, + seg_ignore_label=self.seg_ignore_label, + min_size=self.min_size, + ) + ) + aug_info.update(kwargs) + return aug_info + + def _filter_invalid(self, results, min_size=0): + """Filter bboxes and masks too small or translated out of image.""" + if min_size is None: + return results + bbox2label, bbox2mask, _ = bbox2fields() + for key in results.get("bbox_fields", []): + bbox_w = results[key][:, 2] - results[key][:, 0] + bbox_h = results[key][:, 3] - results[key][:, 1] + valid_inds = (bbox_w > min_size) & (bbox_h > min_size) + valid_inds = np.nonzero(valid_inds)[0] + results[key] = results[key][valid_inds] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][valid_inds] + return results + + def __repr__(self): + return f"""{self.__class__.__name__}( + img_fill_val={self.img_fill_val}, + seg_ignore_label={self.seg_ignore_label}, + min_size={self.magnitude}, + prob: float = {self.prob}, + random_magnitude: bool = {self.random_magnitude}, + )""" + + +@PIPELINES.register_module() +class RandTranslate(GeometricAugmentation): + def __init__(self, x=None, y=None, **kwargs): + super().__init__(**kwargs) + self.x = x + self.y = y + if self.x is None and self.y is None: + self.prob = 0.0 + + def get_magnitude(self, results): + magnitude = {} + if self.random_magnitude: + if isinstance(self.x, (list, tuple)): + assert len(self.x) == 2 + x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0] + magnitude["x"] = x + if isinstance(self.y, (list, tuple)): + assert len(self.y) == 2 + y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0] + magnitude["y"] = y + else: + if self.x is not None: + assert isinstance(self.x, (int, float)) + magnitude["x"] = self.x + if self.y is not None: + assert isinstance(self.y, (int, float)) + magnitude["y"] = self.y + return magnitude + + def apply(self, results, x=None, y=None): + # ratio to pixel + h, w, c = results["img_shape"] + if x is not None: + x = w * x + if y is not None: + y = h * y + if x is not None: + # translate horizontally + self._translate(results, x) + if y is not None: + # translate veritically + self._translate(results, y, direction="vertical") + return results + + def _translate(self, results, offset, direction="horizontal"): + if self.record: + GTrans.apply( + results, + "shift", + dx=offset if direction == "horizontal" else 0, + dy=offset if direction == "vertical" else 0, + ) + self._translate_img(results, offset, direction=direction) + self._translate_bboxes(results, offset, direction=direction) + # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks. + self._translate_masks(results, offset, direction=direction) + self._translate_seg( + results, offset, fill_val=self.seg_ignore_label, direction=direction + ) + + def _translate_img(self, results, offset, direction="horizontal"): + for key in results.get("img_fields", ["img"]): + img = results[key].copy() + results[key] = mmcv.imtranslate( + img, offset, direction, self.img_fill_val + ).astype(img.dtype) + + def _translate_bboxes(self, results, offset, direction="horizontal"): + """Shift bboxes horizontally or vertically, according to offset.""" + h, w, c = results["img_shape"] + for key in results.get("bbox_fields", []): + min_x, min_y, max_x, max_y = np.split( + results[key], results[key].shape[-1], axis=-1 + ) + if direction == "horizontal": + min_x = np.maximum(0, min_x + offset) + max_x = np.minimum(w, max_x + offset) + elif direction == "vertical": + min_y = np.maximum(0, min_y + offset) + max_y = np.minimum(h, max_y + offset) + + # the boxes translated outside of image will be filtered along with + # the corresponding masks, by invoking ``_filter_invalid``. + results[key] = np.concatenate([min_x, min_y, max_x, max_y], axis=-1) + + def _translate_masks(self, results, offset, direction="horizontal", fill_val=0): + """Translate masks horizontally or vertically.""" + h, w, c = results["img_shape"] + for key in results.get("mask_fields", []): + masks = results[key] + results[key] = masks.translate((h, w), offset, direction, fill_val) + + def _translate_seg(self, results, offset, direction="horizontal", fill_val=255): + """Translate segmentation maps horizontally or vertically.""" + for key in results.get("seg_fields", []): + seg = results[key].copy() + results[key] = mmcv.imtranslate(seg, offset, direction, fill_val).astype( + seg.dtype + ) + + def __repr__(self): + repr_str = super().__repr__() + return ("\n").join( + repr_str.split("\n")[:-1] + + [f"x={self.x}", f"y={self.y}"] + + repr_str.split("\n")[-1:] + ) + + +@PIPELINES.register_module() +class RandRotate(GeometricAugmentation): + def __init__(self, angle=None, center=None, scale=1, **kwargs): + super().__init__(**kwargs) + self.angle = angle + self.center = center + self.scale = scale + if self.angle is None: + self.prob = 0.0 + + def get_magnitude(self, results): + magnitude = {} + if self.random_magnitude: + if isinstance(self.angle, (list, tuple)): + assert len(self.angle) == 2 + angle = ( + np.random.random() * (self.angle[1] - self.angle[0]) + self.angle[0] + ) + magnitude["angle"] = angle + else: + if self.angle is not None: + assert isinstance(self.angle, (int, float)) + magnitude["angle"] = self.angle + + return magnitude + + def apply(self, results, angle: float = None): + h, w = results["img"].shape[:2] + center = self.center + if center is None: + center = ((w - 1) * 0.5, (h - 1) * 0.5) + self._rotate_img(results, angle, center, self.scale) + rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale) + if self.record: + GTrans.apply(results, "rotate", cv2_rotation_matrix=rotate_matrix) + self._rotate_bboxes(results, rotate_matrix) + self._rotate_masks(results, angle, center, self.scale, fill_val=0) + self._rotate_seg( + results, angle, center, self.scale, fill_val=self.seg_ignore_label + ) + return results + + def _rotate_img(self, results, angle, center=None, scale=1.0): + """Rotate the image. + + Args: + results (dict): Result dict from loading pipeline. + angle (float): Rotation angle in degrees, positive values + mean clockwise rotation. Same in ``mmcv.imrotate``. + center (tuple[float], optional): Center point (w, h) of the + rotation. Same in ``mmcv.imrotate``. + scale (int | float): Isotropic scale factor. Same in + ``mmcv.imrotate``. + """ + for key in results.get("img_fields", ["img"]): + img = results[key].copy() + img_rotated = mmcv.imrotate( + img, angle, center, scale, border_value=self.img_fill_val + ) + results[key] = img_rotated.astype(img.dtype) + + def _rotate_bboxes(self, results, rotate_matrix): + """Rotate the bboxes.""" + h, w, c = results["img_shape"] + for key in results.get("bbox_fields", []): + min_x, min_y, max_x, max_y = np.split( + results[key], results[key].shape[-1], axis=-1 + ) + coordinates = np.stack( + [[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]] + ) # [4, 2, nb_bbox, 1] + # pad 1 to convert from format [x, y] to homogeneous + # coordinates format [x, y, 1] + coordinates = np.concatenate( + ( + coordinates, + np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype), + ), + axis=1, + ) # [4, 3, nb_bbox, 1] + coordinates = coordinates.transpose((2, 0, 1, 3)) # [nb_bbox, 4, 3, 1] + rotated_coords = np.matmul(rotate_matrix, coordinates) # [nb_bbox, 4, 2, 1] + rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2] + min_x, min_y = ( + np.min(rotated_coords[:, :, 0], axis=1), + np.min(rotated_coords[:, :, 1], axis=1), + ) + max_x, max_y = ( + np.max(rotated_coords[:, :, 0], axis=1), + np.max(rotated_coords[:, :, 1], axis=1), + ) + min_x, min_y = ( + np.clip(min_x, a_min=0, a_max=w), + np.clip(min_y, a_min=0, a_max=h), + ) + max_x, max_y = ( + np.clip(max_x, a_min=min_x, a_max=w), + np.clip(max_y, a_min=min_y, a_max=h), + ) + results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype( + results[key].dtype + ) + + def _rotate_masks(self, results, angle, center=None, scale=1.0, fill_val=0): + """Rotate the masks.""" + h, w, c = results["img_shape"] + for key in results.get("mask_fields", []): + masks = results[key] + results[key] = masks.rotate((h, w), angle, center, scale, fill_val) + + def _rotate_seg(self, results, angle, center=None, scale=1.0, fill_val=255): + """Rotate the segmentation map.""" + for key in results.get("seg_fields", []): + seg = results[key].copy() + results[key] = mmcv.imrotate( + seg, angle, center, scale, border_value=fill_val + ).astype(seg.dtype) + + def __repr__(self): + repr_str = super().__repr__() + return ("\n").join( + repr_str.split("\n")[:-1] + + [f"angle={self.angle}", f"center={self.center}", f"scale={self.scale}"] + + repr_str.split("\n")[-1:] + ) + + +@PIPELINES.register_module() +class RandShear(GeometricAugmentation): + def __init__(self, x=None, y=None, interpolation="bilinear", **kwargs): + super().__init__(**kwargs) + self.x = x + self.y = y + self.interpolation = interpolation + if self.x is None and self.y is None: + self.prob = 0.0 + + def get_magnitude(self, results): + magnitude = {} + if self.random_magnitude: + if isinstance(self.x, (list, tuple)): + assert len(self.x) == 2 + x = np.random.random() * (self.x[1] - self.x[0]) + self.x[0] + magnitude["x"] = x + if isinstance(self.y, (list, tuple)): + assert len(self.y) == 2 + y = np.random.random() * (self.y[1] - self.y[0]) + self.y[0] + magnitude["y"] = y + else: + if self.x is not None: + assert isinstance(self.x, (int, float)) + magnitude["x"] = self.x + if self.y is not None: + assert isinstance(self.y, (int, float)) + magnitude["y"] = self.y + return magnitude + + def apply(self, results, x=None, y=None): + if x is not None: + # translate horizontally + self._shear(results, np.tanh(-x * np.pi / 180)) + if y is not None: + # translate veritically + self._shear(results, np.tanh(y * np.pi / 180), direction="vertical") + return results + + def _shear(self, results, magnitude, direction="horizontal"): + if self.record: + GTrans.apply(results, "shear", magnitude=magnitude, direction=direction) + self._shear_img(results, magnitude, direction, interpolation=self.interpolation) + self._shear_bboxes(results, magnitude, direction=direction) + # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks. + self._shear_masks( + results, magnitude, direction=direction, interpolation=self.interpolation + ) + self._shear_seg( + results, + magnitude, + direction=direction, + interpolation=self.interpolation, + fill_val=self.seg_ignore_label, + ) + + def _shear_img( + self, results, magnitude, direction="horizontal", interpolation="bilinear" + ): + """Shear the image. + + Args: + results (dict): Result dict from loading pipeline. + magnitude (int | float): The magnitude used for shear. + direction (str): The direction for shear, either "horizontal" + or "vertical". + interpolation (str): Same as in :func:`mmcv.imshear`. + """ + for key in results.get("img_fields", ["img"]): + img = results[key] + img_sheared = mmcv.imshear( + img, + magnitude, + direction, + border_value=self.img_fill_val, + interpolation=interpolation, + ) + results[key] = img_sheared.astype(img.dtype) + + def _shear_bboxes(self, results, magnitude, direction="horizontal"): + """Shear the bboxes.""" + h, w, c = results["img_shape"] + if direction == "horizontal": + shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype( + np.float32 + ) # [2, 2] + else: + shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) + for key in results.get("bbox_fields", []): + min_x, min_y, max_x, max_y = np.split( + results[key], results[key].shape[-1], axis=-1 + ) + coordinates = np.stack( + [[min_x, min_y], [max_x, min_y], [min_x, max_y], [max_x, max_y]] + ) # [4, 2, nb_box, 1] + coordinates = ( + coordinates[..., 0].transpose((2, 1, 0)).astype(np.float32) + ) # [nb_box, 2, 4] + new_coords = np.matmul( + shear_matrix[None, :, :], coordinates + ) # [nb_box, 2, 4] + min_x = np.min(new_coords[:, 0, :], axis=-1) + min_y = np.min(new_coords[:, 1, :], axis=-1) + max_x = np.max(new_coords[:, 0, :], axis=-1) + max_y = np.max(new_coords[:, 1, :], axis=-1) + min_x = np.clip(min_x, a_min=0, a_max=w) + min_y = np.clip(min_y, a_min=0, a_max=h) + max_x = np.clip(max_x, a_min=min_x, a_max=w) + max_y = np.clip(max_y, a_min=min_y, a_max=h) + results[key] = np.stack([min_x, min_y, max_x, max_y], axis=-1).astype( + results[key].dtype + ) + + def _shear_masks( + self, + results, + magnitude, + direction="horizontal", + fill_val=0, + interpolation="bilinear", + ): + """Shear the masks.""" + h, w, c = results["img_shape"] + for key in results.get("mask_fields", []): + masks = results[key] + results[key] = masks.shear( + (h, w), + magnitude, + direction, + border_value=fill_val, + interpolation=interpolation, + ) + + def _shear_seg( + self, + results, + magnitude, + direction="horizontal", + fill_val=255, + interpolation="bilinear", + ): + """Shear the segmentation maps.""" + for key in results.get("seg_fields", []): + seg = results[key] + results[key] = mmcv.imshear( + seg, + magnitude, + direction, + border_value=fill_val, + interpolation=interpolation, + ).astype(seg.dtype) + + def __repr__(self): + repr_str = super().__repr__() + return ("\n").join( + repr_str.split("\n")[:-1] + + [f"x_magnitude={self.x}", f"y_magnitude={self.y}"] + + repr_str.split("\n")[-1:] + ) + + +@PIPELINES.register_module() +class RandErase(GeometricAugmentation): + def __init__( + self, + n_iterations=None, + size=None, + squared: bool = True, + patches=None, + **kwargs, + ): + kwargs.update(min_size=None) + super().__init__(**kwargs) + self.n_iterations = n_iterations + self.size = size + self.squared = squared + self.patches = patches + + def get_magnitude(self, results): + magnitude = {} + if self.random_magnitude: + n_iterations = self._get_erase_cycle() + patches = [] + h, w, c = results["img_shape"] + for i in range(n_iterations): + # random sample patch size in the image + ph, pw = self._get_patch_size(h, w) + # random sample patch left top in the image + px, py = np.random.randint(0, w - pw), np.random.randint(0, h - ph) + patches.append([px, py, px + pw, py + ph]) + magnitude["patches"] = patches + else: + assert self.patches is not None + magnitude["patches"] = self.patches + + return magnitude + + def _get_erase_cycle(self): + if isinstance(self.n_iterations, int): + n_iterations = self.n_iterations + else: + assert ( + isinstance(self.n_iterations, (tuple, list)) + and len(self.n_iterations) == 2 + ) + n_iterations = np.random.randint(*self.n_iterations) + return n_iterations + + def _get_patch_size(self, h, w): + if isinstance(self.size, float): + assert 0 < self.size < 1 + return int(self.size * h), int(self.size * w) + else: + assert isinstance(self.size, (tuple, list)) + assert len(self.size) == 2 + assert 0 <= self.size[0] < 1 and 0 <= self.size[1] < 1 + w_ratio = np.random.random() * (self.size[1] - self.size[0]) + self.size[0] + h_ratio = w_ratio + + if not self.squared: + h_ratio = ( + np.random.random() * (self.size[1] - self.size[0]) + self.size[0] + ) + return int(h_ratio * h), int(w_ratio * w) + + def apply(self, results, patches: list): + for patch in patches: + self._erase_image(results, patch, fill_val=self.img_fill_val) + self._erase_mask(results, patch) + self._erase_seg(results, patch, fill_val=self.seg_ignore_label) + return results + + def _erase_image(self, results, patch, fill_val=128): + for key in results.get("img_fields", ["img"]): + tmp = results[key].copy() + x1, y1, x2, y2 = patch + tmp[y1:y2, x1:x2, :] = fill_val + results[key] = tmp + + def _erase_mask(self, results, patch, fill_val=0): + for key in results.get("mask_fields", []): + masks = results[key] + if isinstance(masks, PolygonMasks): + # convert mask to bitmask + masks = masks.to_bitmap() + x1, y1, x2, y2 = patch + tmp = masks.masks.copy() + tmp[:, y1:y2, x1:x2] = fill_val + masks = BitmapMasks(tmp, masks.height, masks.width) + results[key] = masks + + def _erase_seg(self, results, patch, fill_val=0): + for key in results.get("seg_fields", []): + seg = results[key].copy() + x1, y1, x2, y2 = patch + seg[y1:y2, x1:x2] = fill_val + results[key] = seg + + +@PIPELINES.register_module() +class RecomputeBox(object): + def __init__(self, record=False): + self.record = record + + def __call__(self, results): + if self.record: + if "aug_info" not in results: + results["aug_info"] = [] + results["aug_info"].append(dict(type="RecomputeBox")) + _, bbox2mask, _ = bbox2fields() + for key in results.get("bbox_fields", []): + mask_key = bbox2mask.get(key) + if mask_key in results: + masks = results[mask_key] + results[key] = self._recompute_bbox(masks) + return results + + def enable_record(self, mode: bool = True): + self.record = mode + + def _recompute_bbox(self, masks): + boxes = np.zeros(masks.masks.shape[0], 4, dtype=np.float32) + x_any = np.any(masks.masks, axis=1) + y_any = np.any(masks.masks, axis=2) + for idx in range(masks.masks.shape[0]): + x = np.where(x_any[idx, :])[0] + y = np.where(y_any[idx, :])[0] + if len(x) > 0 and len(y) > 0: + boxes[idx, :] = np.array( + [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32 + ) + return boxes + + +# TODO: Implement Augmentation Inside Box + + +@PIPELINES.register_module() +class RandResize(transforms.Resize): + def __init__(self, record=False, **kwargs): + super().__init__(**kwargs) + self.record = record + + def __call__(self, results): + results = super().__call__(results) + if self.record: + scale_factor = results["scale_factor"] + GTrans.apply(results, "scale", sx=scale_factor[0], sy=scale_factor[1]) + + if "aug_info" not in results: + results["aug_info"] = [] + new_h, new_w = results["img"].shape[:2] + results["aug_info"].append( + dict( + type=self.__class__.__name__, + record=False, + img_scale=(new_w, new_h), + keep_ratio=False, + bbox_clip_border=self.bbox_clip_border, + backend=self.backend, + ) + ) + return results + + def enable_record(self, mode: bool = True): + self.record = mode + + +@PIPELINES.register_module() +class RandFlip(transforms.RandomFlip): + def __init__(self, record=False, **kwargs): + super().__init__(**kwargs) + self.record = record + + def __call__(self, results): + results = super().__call__(results) + if self.record: + if "aug_info" not in results: + results["aug_info"] = [] + if results["flip"]: + GTrans.apply( + results, + "flip", + direction=results["flip_direction"], + shape=results["img_shape"][:2], + ) + results["aug_info"].append( + dict( + type=self.__class__.__name__, + record=False, + flip_ratio=1.0, + direction=results["flip_direction"], + ) + ) + else: + results["aug_info"].append( + dict( + type=self.__class__.__name__, + record=False, + flip_ratio=0.0, + direction="vertical", + ) + ) + return results + + def enable_record(self, mode: bool = True): + self.record = mode + + +@PIPELINES.register_module() +class MultiBranch(object): + def __init__(self, **transform_group): + self.transform_group = {k: BaseCompose(v) for k, v in transform_group.items()} + + def __call__(self, results): + multi_results = [] + for k, v in self.transform_group.items(): + res = v(copy.deepcopy(results)) + if res is None: + return None + # res["img_metas"]["tag"] = k + multi_results.append(res) + return multi_results + + +if __name__ == '__main__': + # RandResize(img_scale=[(1333, 400), (1333, 1200)], + # multiscale_mode="range", + # keep_ratio=True), + # RandFlip(flip_ratio=0.5), + img_trans = Sequential( + transforms=[ + SplitClip(), + OneOf( + transforms=[ + dict(type=k) + for k in [ + "Identity", + "AutoContrast", + "RandEqualize", + "RandSolarize", + "RandColor", + "RandContrast", + "RandBrightness", + "RandSharpness", + "RandPosterize", + ] + ] + ), + Composeclip() + ] + ) + img = np.random.randint(0,255,size=[4,2,2,3]) + img = np.uint8(img) + box = None + # pdb.set_trace() + new_img, _ = img_trans((img,box)) + print(new_img.shape) \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/spatial_transforms.py b/Downstream/Spatial-Temporal-Action-Localization/data/spatial_transforms.py new file mode 100644 index 0000000..3ccdd9f --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/spatial_transforms.py @@ -0,0 +1,494 @@ +""" +Adapted code from: + @inproceedings{hara3dcnns, + author={Kensho Hara and Hirokatsu Kataoka and Yutaka Satoh}, + title={Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?}, + booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + pages={6546--6555}, + year={2018}, + }. +""" + +import random +import numbers +import collections + +import numpy as np +import torch +from PIL import Image +try: + import accimage +except ImportError: + accimage = None +import imgaug.augmenters as iaa +import math + +from .RandomAugmentBBox import RandomAugmentBBox + + +class Compose(object): + """Compose several transforms together. + Args: + transforms (list of ``Transform`` objects): List of transforms to compose. + Example: + >>> Compose([ + >>> CenterCrop(10), + >>> ToTensor(), + >>> ]) + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, bounding_boxes=None, **kwargs): + """ + Args: + img (PIL.Image): Image to be transformed. + Returns: + PIL.Image: Transformed image. + """ + if bounding_boxes is not None: + for t in self.transforms: + image, bounding_boxes = t(image, bounding_boxes, **kwargs) + return image, bounding_boxes + else: + for t in self.transforms: + image = t(image) + return image + + def randomize_parameters(self, **kwargs): + for t in self.transforms: + t.randomize_parameters(**kwargs) + + # def set_k_for_bbox_affine_transform(self, **kwargs): + # self.transforms[] + + def __repr__(self): + return self.__class__.__name__ + '(' + repr(self.transforms) + ')' + + +class RandomAugment(RandomAugmentBBox): + def __repr__(self): + return '{self.__class__.__name__}(aug_type={self.aug_type}, magnitude={self.magnitude})'.format(self=self) + + +class ToTensor(object): + """Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor. + Convert a PIL.Image or numpy.ndarray (H x W x C) in the range + [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range + [0.0, 255.0 / norm_value]. + Args: + norm_value (float, optional): Normalization constant. + """ + + def __init__(self, norm_value=255.): + self.norm_value = norm_value + + def __call__(self, pic, bboxes=None, **kwargs): + """ + Args: + pic (PIL.Image or numpy.ndarray): Image to be converted to tensor. + Returns: + Tensor: Converted image. + """ + if isinstance(pic, np.ndarray): + # handle numpy array + img = torch.from_numpy(pic.transpose((2, 0, 1))) + # backward compatibility + if bboxes is not None: + return img.float().div(self.norm_value), bboxes + else: + return img.float().div(self.norm_value) + + if accimage is not None and isinstance(pic, accimage.Image): + nppic = np.zeros( + [pic.channels, pic.height, pic.width], dtype=np.float32) + pic.copyto(nppic) + if bboxes is not None: + return torch.from_numpy(nppic), bboxes + else: + return torch.from_numpy(nppic) + # handle PIL Image + if pic.mode == 'I': + img = torch.from_numpy(np.array(pic, np.int32, copy=False)) + elif pic.mode == 'I;16': + img = torch.from_numpy(np.array(pic, np.int16, copy=False)) + else: + img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) + # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK + if pic.mode == 'YCbCr': + nchannel = 3 + elif pic.mode == 'I;16': + nchannel = 1 + else: + nchannel = len(pic.mode) + img = img.view(pic.size[1], pic.size[0], nchannel) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + if isinstance(img, torch.ByteTensor): + if bboxes is not None: + return img.float().div(self.norm_value), bboxes + else: + return img.float().div(self.norm_value) + else: + if bboxes is not None: + return img, bboxes + else: + return img + + def randomize_parameters(self, **kwargs): + pass + + def __repr__(self): + return '{self.__class__.__name__}(norm_value={self.norm_value})'.format(self=self) + + +class Normalize(object): + """Normalize an tensor image with mean and standard deviation. + Given mean: (R, G, B) and std: (R, G, B), + will normalize each channel of the torch.*Tensor, i.e. + channel = (channel - mean) / std. + Args: + mean (sequence): Sequence of means for R, G, B channels respecitvely. + std (sequence): Sequence of standard deviations for R, G, B channels + respecitvely. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor, bboxes=None, **kwargs): + """ + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be normalized. + Returns: + Tensor: Normalized image. + """ + for t, m, s in zip(tensor, self.mean, self.std): + t.sub_(m).div_(s) + if bboxes is not None: + return tensor, bboxes + else: + return tensor + + def randomize_parameters(self, **kwargs): + pass + + def __repr__(self): + return '{self.__class__.__name__}(mean={self.mean}, std={self.std})'.format(self=self) + + +class Scale(object): + """Rescale the input PIL.Image to the given size. + Args: + size (sequence or int): Desired output size. If size is a sequence like + (w, h), output size will be matched to this. If size is an int, + smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to + (size * height / width, size). + interpolation (int, optional): Desired interpolation. Default is + ``PIL.Image.BILINEAR``. + max_ratio (float, optional): If not None, denotes maximum allowed aspect + ratio after rescaling the input. + """ + + # TO-DO: implement max_ratio with imgaug? + def __init__(self, resize, interpolation='linear', max_ratio=2.42, fixed_size=False): + assert isinstance(resize, + int) or (isinstance(resize, collections.Iterable) and + len(resize) == 2) + self.resize = resize + self.interpolation = interpolation + self.max_size = int(max_ratio*resize) + if fixed_size: + self._resize = iaa.Resize(self.resize, interpolation=interpolation) + else: + self._resize = iaa.Resize({"shorter-side": self.resize, "longer-side": "keep-aspect-ratio"}, interpolation=interpolation) + self._resize_max = iaa.Resize({"shorter-side": "keep-aspect-ratio", "longer-side": self.max_size}, interpolation=interpolation) + + def __call__(self, image, bounding_boxes=None, **kwargs): + """ + Args: + img (PIL.Image): Image to be scaled. + Returns: + PIL.Image: Rescaled image. + """ + img_aug, boudingbox_aug = self._resize_det(image=image, bounding_boxes=bounding_boxes) + + newh, neww, _ = img_aug.shape + + if max(newh, neww) > self.max_size: + img_aug, boudingbox_aug = self._resize_det_max(image=image, bounding_boxes=bounding_boxes) + + if bounding_boxes is not None: + return img_aug, boudingbox_aug + else: + return img_aug + + def randomize_parameters(self, **kwargs): + self._resize_det = self._resize.to_deterministic() + self._resize_det_max = self._resize_max.to_deterministic() + + def __repr__(self): + return '{self.__class__.__name__}(resize={self.resize}, interpolation={self.interpolation}, max_size={self.max_size})'.format(self=self) + + +# TO-DO: re-implement with imgaug +class CenterCrop(object): + """Crop the given PIL.Image at the center. + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img): + """ + Args: + img (PIL.Image): Image to be cropped. + Returns: + PIL.Image: Cropped image. + """ + w, h = img.size + th, tw = self.size + x1 = int(round((w - tw) / 2.)) + y1 = int(round((h - th) / 2.)) + return img.crop((x1, y1, x1 + tw, y1 + th)) + + def randomize_parameters(self): + return [{'transform': 'CenterCrop', 'size': self.size}] + + def __repr__(self): + return '{self.__class__.__name__}(size={self.size})'.format(self=self) + + +# TO-DO: re-implement with imgaug +class CornerCrop(object): + """Crop the given PIL.Image at some corner or the center. + Args: + size (int): Desired output size of the square crop. + crop_position (str, optional): Designate the position to be cropped. + If is None, a random position will be selected from five choices. + """ + + def __init__(self, size, crop_position=None): + """ + Args: + img (PIL.Image): Image to be cropped. + Returns: + PIL.Image: Cropped image. + """ + self.size = size + if crop_position is None: + self.randomize = True + else: + self.randomize = False + self.crop_position = crop_position + self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br'] + + def __call__(self, img): + image_width = img.size[0] + image_height = img.size[1] + + if self.crop_position == 'c': + th, tw = (self.size, self.size) + x1 = int(round((image_width - tw) / 2.)) + y1 = int(round((image_height - th) / 2.)) + x2 = x1 + tw + y2 = y1 + th + elif self.crop_position == 'tl': + x1 = 0 + y1 = 0 + x2 = self.size + y2 = self.size + elif self.crop_position == 'tr': + x1 = image_width - self.size + y1 = 0 + x2 = image_width + y2 = self.size + elif self.crop_position == 'bl': + x1 = 0 + y1 = image_height - self.size + x2 = self.size + y2 = image_height + elif self.crop_position == 'br': + x1 = image_width - self.size + y1 = image_height - self.size + x2 = image_width + y2 = image_height + + img = img.crop((x1, y1, x2, y2)) + + return img + + def randomize_parameters(self, param=None): + if self.randomize: + self.crop_position = self.crop_positions[random.randint( + 0, + len(self.crop_positions) - 1)] + return [{'transform': 'CornerCrop', 'crop_position': self.crop_position, + 'size': self.size}] + + def __repr__(self): + return '{self.__class__.__name__}(size={self.size}, crop_position={self.crop_position})'.format(self=self) + + +class RandomHorizontalFlip(object): + """Horizontally flip the given PIL.Image randomly with a given probability. + Args: + p (float, optional): Probability of flipping. + """ + + def __init__(self, p=0.5): + self.prob = p + self._flip = iaa.Fliplr(self.prob) + + def __call__(self, image, bounding_boxes=None, **kwargs): + """ + Args: + img (PIL.Image): Image to be flipped. + Returns: + PIL.Image: Randomly flipped image. + """ + img_aug, bboxes_aug = self._flip_det(image=image, bounding_boxes=bounding_boxes) + if bounding_boxes is not None: + return img_aug, bboxes_aug + else: + return img_aug + + def randomize_parameters(self, **kwargs): + self._flip_det = self._flip.to_deterministic() + + def __repr__(self): + return '{self.__class__.__name__}(prob={self.prob})'.format(self=self) + + +# TO-DO: re-implement with imgaug +class ScaleJitteringRandomCrop(object): + """Randomly rescale the given PIL.Image and then take a random crop. + Args: + min_scale (int): Minimum scale for random rescaling. + max_scale (int): Maximum scale for random rescaling. + size (int): Desired output size of the square crop. + interpolation (int, optional): Desired interpolation. Default is + ``PIL.Image.BILINEAR``. + """ + + def __init__(self, min_size, max_size, size, interpolation='linear'): + self.min_size = min_size + self.max_size = max_size + self.size = size + self.interpolation = interpolation + self._resize = iaa.Resize({"shorter-side": (self.min_size, self.max_size), "longer-side": "keep-aspect-ratio"}, interpolation=interpolation) + self._crop = iaa.CropToFixedSize(width=self.size, height=self.size) + + def __call__(self, image, bounding_boxes=None, **kwargs): + """ + Args: + img (PIL.Image): Image to be scaled. + Returns: + PIL.Image: Rescaled image. + """ + img_aug, boudingbox_aug = self._resize_det(image=image, bounding_boxes=bounding_boxes) + img_aug, boudingbox_aug = self._crop_det(image=img_aug, bounding_boxes=boudingbox_aug) + + if bounding_boxes is not None: + return img_aug, boudingbox_aug + else: + return img_aug + + def randomize_parameters(self, **kwargs): + self._resize_det = self._resize.to_deterministic() + self._crop_det = self._crop.to_deterministic() + + def __repr__(self): + return '{self.__class__.__name__}(min_size={self.min_size}, max_size={self.max_size}, size={self.size}, interpolation={self.interpolation})'.format(self=self) + + +class CropBBoxRescale(object): + """Crop region within the bbox and resize to fixed size. + Args: + min_scale (int): Minimum scale for random rescaling. + max_scale (int): Maximum scale for random rescaling. + size (int): Desired output size of the square crop. + interpolation (int, optional): Desired interpolation. Default is + ``PIL.Image.BILINEAR``. + """ + + def __init__(self, resize, bbox_scales=None, interpolation='linear'): + if bbox_scales is None: + bbox_scales = [1, 1.5] + self.size = resize + self.interpolation = interpolation + self._resize = iaa.Resize(resize, interpolation=interpolation) + self.scale_list = bbox_scales + + def __call__(self, image, bounding_boxes=None, **kwargs): + """ + Args: + img (PIL.Image): Image to be scaled. + Returns: + PIL.Image: Rescaled image. + """ + h, w, _ = image.shape + + bbox = bounding_boxes[0] + bbox = scale_box(bbox, self.scale) + bbox = clip_box_to_image(bbox, h, w) + x1, y1, x2, y2 = round_down(bbox[0]), round_down(bbox[1]), round_down(bbox[2]), round_down(bbox[3]) + # Crop region inside the bbox + img_cropped = image[y1:y2, x1:x2,:] + # Resize cropped image to fixed size + img_aug = self._resize_det(image=img_cropped) + + # if bounding_boxes is not None: + # return img_aug, boudingbox_aug + # else: + + return img_aug, bounding_boxes + + def randomize_parameters(self, **kwargs): + self.scale = random.uniform(self.scale_list[0], self.scale_list[1]) + self._resize_det = self._resize.to_deterministic() + + def __repr__(self): + return '{self.__class__.__name__}(size={self.size}, interpolation={self.interpolation})'.format(self=self) + + +def round_down(x): + return int(math.floor(x)) + + +def clip_box_to_image(box, height, width): + """Clip an box to an image with the given height and width.""" + box[[0, 2]] = np.minimum(width - 1., np.maximum(0., box[[0, 2]])) + box[[1, 3]] = np.minimum(height - 1., np.maximum(0., box[[1, 3]])) + return box + + +def scale_box(box, scale): + """ + box: (4,) [w1, h1, w2, h2] + """ + w1, h1, w2, h2 = box.x1, box.y1, box.x2, box.y2 + center_w = (w1 + w2) / 2 + center_h = (h1 + h2) / 2 + + half_w = center_w - w1 + half_h = center_h - h1 + + half_w = scale * half_w + half_h = scale * half_h + + new_box = np.array([center_w-half_w, center_h-half_h, + center_w+half_w, center_h+half_h]) + + return new_box \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/data/transforms.py b/Downstream/Spatial-Temporal-Action-Localization/data/transforms.py new file mode 100644 index 0000000..5a795fe --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/data/transforms.py @@ -0,0 +1,72 @@ + +from operator import is_ +from alphaction.dataset.transforms import video_transforms as T +# from rand_aug import * +from dataclasses import dataclass +import numpy as np +from data.RandomAugmentBBox import * +import cv2 + +cv2.setNumThreads(0) + + +@dataclass +class TransformsCfg: + MIN_SIZE_TRAIN: int = 256 + MAX_SIZE_TRAIN: int = 464 + MIN_SIZE_TEST: int = 256 + MAX_SIZE_TEST: int = 464 + PIXEL_MEAN = [122.7717, 115.9465, 102.9801] + PIXEL_STD = [57.375, 57.375, 57.375] + TO_BGR: bool = False + + FRAME_NUM: int = 16 # 16 + FRAME_SAMPLE_RATE: int = 4 # 4 + + COLOR_JITTER: bool = True + HUE_JITTER: float = 20.0 + SAT_JITTER: float = 0.1 + VAL_JITTER: float = 0.1 + + +def build_transforms(cfg=TransformsCfg(), is_train=True, args=None): + # build transforms for training of testing + if is_train: + min_size = cfg.MIN_SIZE_TRAIN + max_size = cfg.MAX_SIZE_TRAIN + color_jitter = cfg.COLOR_JITTER + flip_prob = 0.5 + else: + min_size = cfg.MIN_SIZE_TEST + max_size = cfg.MAX_SIZE_TEST + color_jitter = False + flip_prob = 0 + + frame_num = cfg.FRAME_NUM + sample_rate = cfg.FRAME_SAMPLE_RATE + + if color_jitter: + color_transform = T.ColorJitter( + cfg.HUE_JITTER, cfg.SAT_JITTER, cfg.VAL_JITTER + ) + else: + color_transform = T.Identity() + + to_bgr = cfg.TO_BGR + normalize_transform = T.Normalize( + mean=cfg.PIXEL_MEAN, std=cfg.PIXEL_STD, to_bgr=to_bgr + ) + transform = T.Compose( + [ + T.TemporalCrop(frame_num, sample_rate), + T.Resize(min_size, max_size), + T.RandomClip(is_train), + color_transform, + T.RandomHorizontalFlip(flip_prob), + T.ToTensor(), + normalize_transform, + ] + ) + + return transform + \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/datasets.py b/Downstream/Spatial-Temporal-Action-Localization/datasets.py new file mode 100644 index 0000000..90249f7 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/datasets.py @@ -0,0 +1,301 @@ +import os +from torchvision import transforms +from transforms import * +from masking_generator import TubeMaskingGenerator +from kinetics import VideoClsDataset, VideoMAE + +from data.ava import AVAVideoDataset,KineticsDataset,AKDataset +from data.transforms import TransformsCfg +import alphaction.config.paths_catalog as paths_catalog +from alphaction.dataset.collate_batch import batch_different_videos + + +class DataAugmentationForVideoMAE(object): + def __init__(self, args): + self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN + self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD + normalize = GroupNormalize(self.input_mean, self.input_std) + self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) + self.transform = transforms.Compose([ + self.train_augmentation, + Stack(roll=False), + ToTorchFormatTensor(div=True), + normalize, + ]) + if args.mask_type == 'tube': + self.masked_position_generator = TubeMaskingGenerator( + args.window_size, args.mask_ratio + ) + + def __call__(self, images): + process_data , _ = self.transform(images) + return process_data, self.masked_position_generator() + + def __repr__(self): + repr = "(DataAugmentationForVideoMAE,\n" + repr += " transform = %s,\n" % str(self.transform) + repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) + repr += ")" + return repr + + +def build_pretraining_dataset(args): + transform = DataAugmentationForVideoMAE(args) + dataset = VideoMAE( + root=None, + setting=args.data_path, + video_ext='mp4', + is_color=True, + modality='rgb', + new_length=args.num_frames, + new_step=args.sampling_rate, + transform=transform, + temporal_jitter=False, + video_loader=True, + use_decord=True, + lazy_init=False) + print("Data Aug = %s" % str(transform)) + return dataset + + +def build_dataset(is_train, test_mode, args): + if args.data_set == 'Kinetics-400': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'test.csv') + + dataset = VideoClsDataset( + anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 400 + + elif args.data_set == 'SSV2': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'test.csv') + + dataset = VideoClsDataset( + anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 174 + + elif args.data_set == 'UCF101': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'test.csv') + + dataset = VideoClsDataset( + anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 101 + + elif args.data_set == 'HMDB51': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'test.csv') + + dataset = VideoClsDataset( + anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 51 + else: + raise NotImplementedError() + assert nb_classes == args.nb_classes + print("Number of the class = %d" % args.nb_classes) + + return dataset, nb_classes + + +class BatchCollator(object): + """ + From a list of samples from the dataset, + returns the batched objectimages and targets. + This should be passed to the DataLoader + """ + def __init__(self, size_divisible=0): + self.divisible = size_divisible + self.size_divisible = self.divisible + + def __call__(self, batch): + transposed_batch = list(zip(*batch)) + video_data = batch_different_videos(transposed_batch[0], self.size_divisible) + boxes = transposed_batch[1] + video_ids = transposed_batch[2] + return video_data, boxes, video_ids + + +def build_ava_dataset(is_train, transforms): + input_filename = 'ava_video_train_v2.2' if is_train else 'ava_video_val_v2.2' + assert input_filename + dataset_catalog = paths_catalog.DatasetCatalog + data = dataset_catalog.get(input_filename) + ava_args = data["args"] + ava_args["remove_clips_without_annotations"] = is_train + ava_args["frame_span"] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 + if not is_train: + ava_args["box_thresh"] = 0.8 # + ava_args["action_thresh"] = 0. # + else: + ava_args["box_file"] = None + + ava_args["transforms"] = transforms + dataset = AVAVideoDataset(**ava_args) + return dataset + +def build_kinetics_dataset(is_train,transforms): + input_filename = 'ava_video_train_v2.2' if is_train else 'ava_video_val_v2.2' + kinetics_args = {} + if is_train: + kinetics_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_train_v1.0.json" + kinetics_args['box_file'] = None + kinetics_args['transforms'] = transforms + kinetics_args['remove_clips_without_annotations'] = True + kinetics_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 + else: + kinetics_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_val_v1.0.json" + kinetics_args['box_file'] = '/mnt/cache/xingsen/xingsen2/kinetics_person_box.json' + kinetics_args['box_thresh'] = 0.8 + kinetics_args['action_thresh'] = 0. + kinetics_args['transforms'] = transforms + kinetics_args['remove_clips_without_annotations'] = True + kinetics_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 + kinetics_args['eval_file_paths'] = { + "csv_gt_file" : '/mnt/cache/xingsen/xingsen2/kinetics_val_gt_v1.0.csv', + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + } + dataset = KineticsDataset(**kinetics_args) + return dataset + + +def build_ak_dataset(is_train,transforms): + input_filename = "ak_train" if is_train else "ak_val" + assert input_filename + dataset_catalog = paths_catalog.DatasetCatalog + data_args = dataset_catalog.get(input_filename) + if is_train: + data_args['video_root'] = '/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/' + data_args['ava_annfile'] = "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2_min.json" + data_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_train_v1.0.json" + data_args['transforms'] = transforms + data_args['remove_clips_without_annotations'] = True + data_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 + data_args["ava_eval_file_path"] = { + "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2.csv", + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_excluded_timestamps_v2.2.csv", + } + else: + data_args['video_root'] = '/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/' + data_args['ava_annfile'] = "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2_min.json" + data_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_val_v1.0.json" + data_args['box_thresh'] = 0.8 + data_args['action_thresh'] = 0. + data_args['transforms'] = transforms + data_args['remove_clips_without_annotations'] = True + data_args['kinetics_box_file'] = '/mnt/cache/xingsen/xingsen2/kinetics_person_box.json' + data_args['ava_box_file'] = '/mnt/cache/xingsen/ava_dataset/AVA/boxes/ava_val_det_person_bbox.json' + data_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 + data_args['eval_file_paths'] = { + "csv_gt_file":'/mnt/cache/xingsen/xingsen2/ak_val_gt.csv', + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + } + data_args["ava_eval_file_path"] = { + "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2.csv", + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + } + data_args['kinetics_eval_file_path'] = { + "csv_gt_file" : '/mnt/cache/xingsen/xingsen2/kinetics_val_gt_v1.0.csv', + "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", + "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", + } + + + dataset = AKDataset(**data_args) + return dataset diff --git a/Downstream/Spatial-Temporal-Action-Localization/engine_for_finetuning.py b/Downstream/Spatial-Temporal-Action-Localization/engine_for_finetuning.py new file mode 100644 index 0000000..c2d5434 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/engine_for_finetuning.py @@ -0,0 +1,364 @@ +from itertools import count +import os +import numpy as np +import math +import sys +import time +import datetime +import logging +from typing import Iterable, Optional +import torch +import torch.nn as nn + +import torch.nn.functional as F + +from timm.data import Mixup +from timm.utils import accuracy, ModelEma +import utils +from alphaction.modeling.utils import cat +from alphaction.structures.bounding_box import BoxList +from data.ava_eval import do_ava_evaluation +import pdb + + +def train_class_batch(model, samples, boxes): + outputs = model(samples, boxes) + labels = cat([proposal.get_field("labels") for proposal in boxes], dim=0) # [n,80] + assert outputs.shape[1] == labels.shape[1], \ + "The shape of tensor class logits doesn't match the label tensor." + # loss = criterion(outputs, target) + batch_size = outputs.shape[0] + loss = F.binary_cross_entropy_with_logits(outputs, labels.to(dtype=torch.float32), reduction='mean') + loss = loss * batch_size + + return loss, outputs + + +def get_loss_scale_for_deepspeed(model): + optimizer = model.optimizer + return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale + + +def train_one_epoch(model: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, + model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None, + start_steps=None, lr_schedule_values=None, wd_schedule_values=None, + num_training_steps_per_epoch=None, update_freq=None): + model.train(True) + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 10 + + if loss_scaler is None: + model.zero_grad() + model.micro_steps = 0 + else: + optimizer.zero_grad() + + for data_iter_step, (samples, boxes, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): + step = data_iter_step // update_freq + if step >= num_training_steps_per_epoch: + continue + it = start_steps + step # global training iteration + # Update LR & WD for the first acc + if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0: + for i, param_group in enumerate(optimizer.param_groups): + if lr_schedule_values is not None: + param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] + if wd_schedule_values is not None and param_group["weight_decay"] > 0: + param_group["weight_decay"] = wd_schedule_values[it] + + samples = samples.to(device, non_blocking=True) + boxes = [box.to(device=device) for box in boxes] # boxlist + # targets = targets.to(device, non_blocking=True) + + # if mixup_fn is not None: + # samples, targets = mixup_fn(samples, targets) + + if loss_scaler is None: + samples = samples.half() + loss, _ = train_class_batch( + model, samples, boxes) + else: + with torch.cuda.amp.autocast(): + loss, _ = train_class_batch( + model, samples, boxes) + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + if loss_scaler is None: + loss /= update_freq + model.backward(loss) + model.step() + + if (data_iter_step + 1) % update_freq == 0: + # model.zero_grad() + # Deepspeed will call step() & model.zero_grad() automatic + if model_ema is not None: + model_ema.update(model) + grad_norm = None + loss_scale_value = get_loss_scale_for_deepspeed(model) + else: + # this attribute is added by timm on one optimizer (adahessian) + is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order + loss /= update_freq + grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, + parameters=model.parameters(), create_graph=is_second_order, + update_grad=(data_iter_step + 1) % update_freq == 0) + if (data_iter_step + 1) % update_freq == 0: + optimizer.zero_grad() + if model_ema is not None: + model_ema.update(model) + loss_scale_value = loss_scaler.state_dict()["scale"] + + torch.cuda.synchronize() + + # if mixup_fn is None: + # class_acc = (output.max(-1)[-1] == targets).float().mean() + # else: + # class_acc = None + metric_logger.update(loss=loss_value) + # metric_logger.update(class_acc=class_acc) + metric_logger.update(loss_scale=loss_scale_value) + min_lr = 10. + max_lr = 0. + for group in optimizer.param_groups: + min_lr = min(min_lr, group["lr"]) + max_lr = max(max_lr, group["lr"]) + + metric_logger.update(lr=max_lr) + metric_logger.update(min_lr=min_lr) + weight_decay_value = None + for group in optimizer.param_groups: + if group["weight_decay"] > 0: + weight_decay_value = group["weight_decay"] + metric_logger.update(weight_decay=weight_decay_value) + metric_logger.update(grad_norm=grad_norm) + + if log_writer is not None: + log_writer.update(loss=loss_value, head="loss") + # log_writer.update(class_acc=class_acc, head="loss") + log_writer.update(loss_scale=loss_scale_value, head="opt") + log_writer.update(lr=max_lr, head="opt") + log_writer.update(min_lr=min_lr, head="opt") + log_writer.update(weight_decay=weight_decay_value, head="opt") + log_writer.update(grad_norm=grad_norm, head="opt") + + log_writer.set_step() + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +class PostProcessor(nn.Module): + + def forward(self, class_logits, boxes): + # boxes should be (#detections,4) + # prob should be calculated in different way. + class_logits = torch.sigmoid(class_logits) # [n,80] + # 给action_prob乘以box分数 + box_scores = cat([box.get_field("scores") for box in boxes], dim=0) + box_scores = box_scores.reshape(class_logits.shape[0], 1) # [B,1] + action_prob = class_logits * box_scores + + image_shapes = [box.size for box in boxes] + boxes_per_image = [len(box) for box in boxes] + box_tensors = [a.bbox for a in boxes] + + action_prob = action_prob.split(boxes_per_image, dim=0) # [rois,80]->[bs,per_roi,80] + + results = [] + for prob, boxes_per_image, image_shape in zip( + action_prob, box_tensors, image_shapes + ): + boxlist = self.prepare_boxlist(boxes_per_image, prob, image_shape) + results.append(boxlist) + return results + + def prepare_boxlist(self, boxes, scores, image_shape): + boxlist = BoxList(boxes, image_shape, mode="xyxy") + boxlist.add_field("scores", scores) + return boxlist + + +@torch.no_grad() +def validation_one_epoch(data_loader, model, device, output_dir, epoch, log_writer=None): + if not utils.is_main_process(): + return + + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Val:' + + # switch to evaluation mode + model.eval() + + logging.info("Start evaluation on ava_v2.2 dataset({} videos).".format(data_loader.num_samples)) + start_time = time.time() + + cpu_device = torch.device("cpu") + results_dict = {} + postprocess = PostProcessor() + for batch in metric_logger.log_every(data_loader, 10, header): + # pdb.set_trace() + videos = batch[0] + boxes = batch[1] + video_ids = batch[2] + + videos = videos.to(device, non_blocking=True) + boxes = [box.to(device=device) for box in boxes] # boxlist + # target = target.to(device, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(): + output = model(videos, boxes) # [n,80] + output = postprocess(output, boxes) + output = [o.to(cpu_device) for o in output] + results_dict.update( + {video_id: result for video_id, result in zip(video_ids, output)} + ) + # pdb.set_trace() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=total_time)) + logging.info( + "Total inference time: {}".format(total_time_str) + ) + + # convert a dict where the key is the index in a list + video_ids = list(sorted(results_dict.keys())) + if len(video_ids) != video_ids[-1] + 1: + logging.warning( + "Number of videos that were gathered from multiple processes is not " + "a contiguous set. Some images might be missing from the evaluation" + ) + # convert to a list + predictions = [results_dict[i] for i in video_ids] + + logging.info("Performing ava evaluation") + + output_folder = os.path.join(output_dir, "inference") + os.makedirs(output_folder, exist_ok=True) + + eval_res = do_ava_evaluation( + dataset=data_loader.dataset, + predictions=predictions, + output_folder=output_folder, + ) + + if log_writer is not None: + eval_res, _ = eval_res + total_mAP = eval_res['PascalBoxes_Precision/mAP@0.5IOU'] + log_writer.update(map=total_mAP, head="perf", step=epoch) + +# 以下没用到 +@torch.no_grad() +def final_test(data_loader, model, device, file): + criterion = torch.nn.CrossEntropyLoss() + + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + + # switch to evaluation mode + model.eval() + final_result = [] + + for batch in metric_logger.log_every(data_loader, 10, header): + videos = batch[0] + target = batch[1] + ids = batch[2] + chunk_nb = batch[3] + split_nb = batch[4] + videos = videos.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(): + output = model(videos) + loss = criterion(output, target) + + for i in range(output.size(0)): + string = "{} {} {} {} {}\n".format(ids[i], \ + str(output.data[i].cpu().numpy().tolist()), \ + str(int(target[i].cpu().numpy())), \ + str(int(chunk_nb[i].cpu().numpy())), \ + str(int(split_nb[i].cpu().numpy()))) + final_result.append(string) + + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + batch_size = videos.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + + if not os.path.exists(file): + os.mknod(file) + with open(file, 'w') as f: + f.write("{}, {}\n".format(acc1, acc5)) + for line in final_result: + f.write(line) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' + .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) + + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +def merge(eval_path, num_tasks): + dict_feats = {} + dict_label = {} + dict_pos = {} + print("Reading individual output files") + + for x in range(num_tasks): + file = os.path.join(eval_path, str(x) + '.txt') + lines = open(file, 'r').readlines()[1:] + for line in lines: + line = line.strip() + name = line.split('[')[0] + label = line.split(']')[1].split(' ')[1] + chunk_nb = line.split(']')[1].split(' ')[2] + split_nb = line.split(']')[1].split(' ')[3] + data = np.fromstring(line.split('[')[1].split(']')[0], dtype=np.float, sep=',') + if not name in dict_feats: + dict_feats[name] = [] + dict_label[name] = 0 + dict_pos[name] = [] + if chunk_nb + split_nb in dict_pos[name]: + continue + dict_feats[name].append(data) + dict_pos[name].append(chunk_nb + split_nb) + dict_label[name] = label + print("Computing final results") + + input_lst = [] + print(len(dict_feats)) + for i, item in enumerate(dict_feats): + input_lst.append([i, item, dict_feats[item], dict_label[item]]) + from multiprocessing import Pool + p = Pool(64) + ans = p.map(compute_video, input_lst) + top1 = [x[1] for x in ans] + top5 = [x[2] for x in ans] + pred = [x[0] for x in ans] + label = [x[3] for x in ans] + final_top1 ,final_top5 = np.mean(top1), np.mean(top5) + return final_top1*100 ,final_top5*100 + +def compute_video(lst): + i, video_id, data, label = lst + feat = [x for x in data] + feat = np.mean(feat, axis=0) + pred = np.argmax(feat) + top1 = (int(pred) == int(label)) * 1.0 + top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0 + return [pred, top1, top5, int(label)] diff --git a/Downstream/Spatial-Temporal-Action-Localization/engine_for_pretraining.py b/Downstream/Spatial-Temporal-Action-Localization/engine_for_pretraining.py new file mode 100644 index 0000000..006b860 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/engine_for_pretraining.py @@ -0,0 +1,105 @@ +import math +import sys +from typing import Iterable +import torch +import torch.nn as nn +import utils +from einops import rearrange +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + +def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, patch_size: int = 16, + normlize_target: bool = True, log_writer=None, lr_scheduler=None, start_steps=None, + lr_schedule_values=None, wd_schedule_values=None): + model.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 10 + + loss_func = nn.MSELoss() + + for step, batch in enumerate(metric_logger.log_every(data_loader, print_freq, header)): + # assign learning rate & weight decay for each step + it = start_steps + step # global training iteration + if lr_schedule_values is not None or wd_schedule_values is not None: + for i, param_group in enumerate(optimizer.param_groups): + if lr_schedule_values is not None: + param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] + if wd_schedule_values is not None and param_group["weight_decay"] > 0: + param_group["weight_decay"] = wd_schedule_values[it] + + videos, bool_masked_pos = batch + videos = videos.to(device, non_blocking=True) + bool_masked_pos = bool_masked_pos.to(device, non_blocking=True).flatten(1).to(torch.bool) + + with torch.no_grad(): + # calculate the predict label + mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, None, None, None] + std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, None, None, None] + unnorm_videos = videos * std + mean # in [0, 1] + + if normlize_target: + videos_squeeze = rearrange(unnorm_videos, 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', p0=2, p1=patch_size, p2=patch_size) + videos_norm = (videos_squeeze - videos_squeeze.mean(dim=-2, keepdim=True) + ) / (videos_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) + # we find that the mean is about 0.48 and standard deviation is about 0.08. + videos_patch = rearrange(videos_norm, 'b n p c -> b n (p c)') + else: + videos_patch = rearrange(unnorm_videos, 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2 c)', p0=2, p1=patch_size, p2=patch_size) + + B, _, C = videos_patch.shape + labels = videos_patch[bool_masked_pos].reshape(B, -1, C) + + with torch.cuda.amp.autocast(): + outputs = model(videos, bool_masked_pos) + loss = loss_func(input=outputs, target=labels) + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + optimizer.zero_grad() + # this attribute is added by timm on one optimizer (adahessian) + is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order + grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, + parameters=model.parameters(), create_graph=is_second_order) + loss_scale_value = loss_scaler.state_dict()["scale"] + + torch.cuda.synchronize() + + metric_logger.update(loss=loss_value) + metric_logger.update(loss_scale=loss_scale_value) + min_lr = 10. + max_lr = 0. + for group in optimizer.param_groups: + min_lr = min(min_lr, group["lr"]) + max_lr = max(max_lr, group["lr"]) + + metric_logger.update(lr=max_lr) + metric_logger.update(min_lr=min_lr) + weight_decay_value = None + for group in optimizer.param_groups: + if group["weight_decay"] > 0: + weight_decay_value = group["weight_decay"] + metric_logger.update(weight_decay=weight_decay_value) + metric_logger.update(grad_norm=grad_norm) + + if log_writer is not None: + log_writer.update(loss=loss_value, head="loss") + log_writer.update(loss_scale=loss_scale_value, head="opt") + log_writer.update(lr=max_lr, head="opt") + log_writer.update(min_lr=min_lr, head="opt") + log_writer.update(weight_decay=weight_decay_value, head="opt") + log_writer.update(grad_norm=grad_norm, head="opt") + log_writer.set_step() + + if lr_scheduler is not None: + lr_scheduler.step_update(start_steps + step) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} diff --git a/Downstream/Spatial-Temporal-Action-Localization/figs/videomae.png b/Downstream/Spatial-Temporal-Action-Localization/figs/videomae.png new file mode 100644 index 0000000..a598216 Binary files /dev/null and b/Downstream/Spatial-Temporal-Action-Localization/figs/videomae.png differ diff --git a/Downstream/Spatial-Temporal-Action-Localization/functional.py b/Downstream/Spatial-Temporal-Action-Localization/functional.py new file mode 100644 index 0000000..8e12e28 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/functional.py @@ -0,0 +1,89 @@ +import numbers +import cv2 +import numpy as np +import PIL +import torch + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[0], size[1] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.BILINEAR + else: + pil_inter = PIL.Image.NEAREST + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + + return clip diff --git a/Downstream/Spatial-Temporal-Action-Localization/kinetics.py b/Downstream/Spatial-Temporal-Action-Localization/kinetics.py new file mode 100644 index 0000000..0a83c50 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/kinetics.py @@ -0,0 +1,556 @@ +import os +import numpy as np +from numpy.lib.function_base import disp +import torch +import decord +from PIL import Image +from torchvision import transforms +from random_erasing import RandomErasing +import warnings +from decord import VideoReader, cpu +from torch.utils.data import Dataset +import video_transforms as video_transforms +import volume_transforms as volume_transforms + +class VideoClsDataset(Dataset): + """Load your own video classification dataset.""" + + def __init__(self, anno_path, data_path, mode='train', clip_len=8, + frame_sample_rate=2, crop_size=224, short_side_size=256, + new_height=256, new_width=340, keep_aspect_ratio=True, + num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,args=None): + self.anno_path = anno_path + self.data_path = data_path + self.mode = mode + self.clip_len = clip_len + self.frame_sample_rate = frame_sample_rate + self.crop_size = crop_size + self.short_side_size = short_side_size + self.new_height = new_height + self.new_width = new_width + self.keep_aspect_ratio = keep_aspect_ratio + self.num_segment = num_segment + self.test_num_segment = test_num_segment + self.num_crop = num_crop + self.test_num_crop = test_num_crop + self.args = args + self.aug = False + self.rand_erase = False + if self.mode in ['train']: + self.aug = True + if self.args.reprob > 0: + self.rand_erase = True + if VideoReader is None: + raise ImportError("Unable to import `decord` which is required to read videos.") + + import pandas as pd + cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ') + self.dataset_samples = list(cleaned.values[:, 0]) + self.label_array = list(cleaned.values[:, 1]) + + if (mode == 'train'): + pass + + elif (mode == 'validation'): + self.data_transform = video_transforms.Compose([ + video_transforms.Resize(self.short_side_size, interpolation='bilinear'), + video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)), + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + elif mode == 'test': + self.data_resize = video_transforms.Compose([ + video_transforms.Resize(size=(short_side_size), interpolation='bilinear') + ]) + self.data_transform = video_transforms.Compose([ + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + self.test_seg = [] + self.test_dataset = [] + self.test_label_array = [] + for ck in range(self.test_num_segment): + for cp in range(self.test_num_crop): + for idx in range(len(self.label_array)): + sample_label = self.label_array[idx] + self.test_label_array.append(sample_label) + self.test_dataset.append(self.dataset_samples[idx]) + self.test_seg.append((ck, cp)) + + def __getitem__(self, index): + if self.mode == 'train': + args = self.args + scale_t = 1 + + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn("video {} not correctly loaded during training".format(sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) + + if args.num_sample > 1: + frame_list = [] + label_list = [] + index_list = [] + for _ in range(args.num_sample): + new_frames = self._aug_frame(buffer, args) + label = self.label_array[index] + frame_list.append(new_frames) + label_list.append(label) + index_list.append(index) + return frame_list, label_list, index_list, {} + else: + buffer = self._aug_frame(buffer, args) + + return buffer, self.label_array[index], index, {} + + elif self.mode == 'validation': + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord(sample) + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn("video {} not correctly loaded during validation".format(sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord(sample) + buffer = self.data_transform(buffer) + return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0] + + elif self.mode == 'test': + sample = self.test_dataset[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.loadvideo_decord(sample) + + while len(buffer) == 0: + warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\ + str(self.test_dataset[index]), chunk_nb, split_nb)) + index = np.random.randint(self.__len__()) + sample = self.test_dataset[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.loadvideo_decord(sample) + + buffer = self.data_resize(buffer) + if isinstance(buffer, list): + buffer = np.stack(buffer, 0) + + spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ + / (self.test_num_crop - 1) + temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \ + / (self.test_num_segment - 1), 0) + temporal_start = int(chunk_nb * temporal_step) + spatial_start = int(split_nb * spatial_step) + if buffer.shape[1] >= buffer.shape[2]: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + spatial_start:spatial_start + self.short_side_size, :, :] + else: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + :, spatial_start:spatial_start + self.short_side_size, :] + + buffer = self.data_transform(buffer) + return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \ + chunk_nb, split_nb + else: + raise NameError('mode {} unkown'.format(self.mode)) + + def _aug_frame( + self, + buffer, + args, + ): + + aug_transform = video_transforms.create_random_augment( + input_size=(self.crop_size, self.crop_size), + auto_augment=args.aa, + interpolation=args.train_interpolation, + ) + + buffer = [ + transforms.ToPILImage()(frame) for frame in buffer + ] + + buffer = aug_transform(buffer) + + buffer = [transforms.ToTensor()(img) for img in buffer] + buffer = torch.stack(buffer) # T C H W + buffer = buffer.permute(0, 2, 3, 1) # T H W C + + # T H W C + buffer = tensor_normalize( + buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] + ) + # T H W C -> C T H W. + buffer = buffer.permute(3, 0, 1, 2) + # Perform data augmentation. + scl, asp = ( + [0.08, 1.0], + [0.75, 1.3333], + ) + + buffer = spatial_sampling( + buffer, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=self.crop_size, + random_horizontal_flip=False if args.data_set == 'SSV2' else True , + inverse_uniform_sampling=False, + aspect_ratio=asp, + scale=scl, + motion_shift=False + ) + + if self.rand_erase: + erase_transform = RandomErasing( + args.reprob, + mode=args.remode, + max_count=args.recount, + num_splits=args.recount, + device="cpu", + ) + buffer = buffer.permute(1, 0, 2, 3) + buffer = erase_transform(buffer) + buffer = buffer.permute(1, 0, 2, 3) + + return buffer + + + def loadvideo_decord(self, sample, sample_rate_scale=1): + """Load video content using Decord""" + fname = sample + + if not (os.path.exists(fname)): + return [] + + # avoid hanging issue + if os.path.getsize(fname) < 1 * 1024: + print('SKIP: ', fname, " - ", os.path.getsize(fname)) + return [] + try: + if self.keep_aspect_ratio: + vr = VideoReader(fname, num_threads=1, ctx=cpu(0)) + else: + vr = VideoReader(fname, width=self.new_width, height=self.new_height, + num_threads=1, ctx=cpu(0)) + except: + print("video cannot be loaded by decord: ", fname) + return [] + + if self.mode == 'test': + all_index = [x for x in range(0, len(vr), self.frame_sample_rate)] + while len(all_index) < self.clip_len: + all_index.append(all_index[-1]) + vr.seek(0) + buffer = vr.get_batch(all_index).asnumpy() + return buffer + + # handle temporal segments + converted_len = int(self.clip_len * self.frame_sample_rate) + seg_len = len(vr) // self.num_segment + + all_index = [] + for i in range(self.num_segment): + if seg_len <= converted_len: + index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate) + index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len)) + index = np.clip(index, 0, seg_len - 1).astype(np.int64) + else: + end_idx = np.random.randint(converted_len, seg_len) + str_idx = end_idx - converted_len + index = np.linspace(str_idx, end_idx, num=self.clip_len) + index = np.clip(index, str_idx, end_idx - 1).astype(np.int64) + index = index + i*seg_len + all_index.extend(list(index)) + + all_index = all_index[::int(sample_rate_scale)] + vr.seek(0) + buffer = vr.get_batch(all_index).asnumpy() + return buffer + + def __len__(self): + if self.mode != 'test': + return len(self.dataset_samples) + else: + return len(self.test_dataset) + + +def spatial_sampling( + frames, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + random_horizontal_flip=True, + inverse_uniform_sampling=False, + aspect_ratio=None, + scale=None, + motion_shift=False, +): + """ + Perform spatial sampling on the given video frames. If spatial_idx is + -1, perform random scale, random crop, and random flip on the given + frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling + with the given spatial_idx. + Args: + frames (tensor): frames of images sampled from the video. The + dimension is `num frames` x `height` x `width` x `channel`. + spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, + or 2, perform left, center, right crop if width is larger than + height, and perform top, center, buttom crop if height is larger + than width. + min_scale (int): the minimal size of scaling. + max_scale (int): the maximal size of scaling. + crop_size (int): the size of height and width used to crop the + frames. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, + max_scale]. + aspect_ratio (list): Aspect ratio range for resizing. + scale (list): Scale range for resizing. + motion_shift (bool): Whether to apply motion shift for resizing. + Returns: + frames (tensor): spatially sampled frames. + """ + assert spatial_idx in [-1, 0, 1, 2] + if spatial_idx == -1: + if aspect_ratio is None and scale is None: + frames, _ = video_transforms.random_short_side_scale_jitter( + images=frames, + min_size=min_scale, + max_size=max_scale, + inverse_uniform_sampling=inverse_uniform_sampling, + ) + frames, _ = video_transforms.random_crop(frames, crop_size) + else: + transform_func = ( + video_transforms.random_resized_crop_with_shift + if motion_shift + else video_transforms.random_resized_crop + ) + frames = transform_func( + images=frames, + target_height=crop_size, + target_width=crop_size, + scale=scale, + ratio=aspect_ratio, + ) + if random_horizontal_flip: + frames, _ = video_transforms.horizontal_flip(0.5, frames) + else: + # The testing is deterministic and no jitter should be performed. + # min_scale, max_scale, and crop_size are expect to be the same. + assert len({min_scale, max_scale, crop_size}) == 1 + frames, _ = video_transforms.random_short_side_scale_jitter( + frames, min_scale, max_scale + ) + frames, _ = video_transforms.uniform_crop(frames, crop_size, spatial_idx) + return frames + + +def tensor_normalize(tensor, mean, std): + """ + Normalize a given tensor by subtracting the mean and dividing the std. + Args: + tensor (tensor): tensor to normalize. + mean (tensor or list): mean value to subtract. + std (tensor or list): std to divide. + """ + if tensor.dtype == torch.uint8: + tensor = tensor.float() + tensor = tensor / 255.0 + if type(mean) == list: + mean = torch.tensor(mean) + if type(std) == list: + std = torch.tensor(std) + tensor = tensor - mean + tensor = tensor / std + return tensor + + +class VideoMAE(torch.utils.data.Dataset): + """Load your own video classification dataset. + Parameters + ---------- + root : str, required. + Path to the root folder storing the dataset. + setting : str, required. + A text file describing the dataset, each line per video sample. + There are three items in each line: (1) video path; (2) video length and (3) video label. + train : bool, default True. + Whether to load the training or validation set. + test_mode : bool, default False. + Whether to perform evaluation on the test set. + Usually there is three-crop or ten-crop evaluation strategy involved. + name_pattern : str, default None. + The naming pattern of the decoded video frames. + For example, img_00012.jpg. + video_ext : str, default 'mp4'. + If video_loader is set to True, please specify the video format accordinly. + is_color : bool, default True. + Whether the loaded image is color or grayscale. + modality : str, default 'rgb'. + Input modalities, we support only rgb video frames for now. + Will add support for rgb difference image and optical flow image later. + num_segments : int, default 1. + Number of segments to evenly divide the video into clips. + A useful technique to obtain global video-level information. + Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016. + num_crop : int, default 1. + Number of crops for each image. default is 1. + Common choices are three crops and ten crops during evaluation. + new_length : int, default 1. + The length of input video clip. Default is a single image, but it can be multiple video frames. + For example, new_length=16 means we will extract a video clip of consecutive 16 frames. + new_step : int, default 1. + Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames. + new_step=2 means we will extract a video clip of every other frame. + temporal_jitter : bool, default False. + Whether to temporally jitter if new_step > 1. + video_loader : bool, default False. + Whether to use video loader to load data. + use_decord : bool, default True. + Whether to use Decord video loader to load data. Otherwise use mmcv video loader. + transform : function, default None. + A function that takes data and label and transforms them. + data_aug : str, default 'v1'. + Different types of data augmentation auto. Supports v1, v2, v3 and v4. + lazy_init : bool, default False. + If set to True, build a dataset instance without loading any dataset. + """ + def __init__(self, + root, + setting, + train=True, + test_mode=False, + name_pattern='img_%05d.jpg', + video_ext='mp4', + is_color=True, + modality='rgb', + num_segments=1, + num_crop=1, + new_length=1, + new_step=1, + transform=None, + temporal_jitter=False, + video_loader=False, + use_decord=False, + lazy_init=False): + + super(VideoMAE, self).__init__() + self.root = root + self.setting = setting + self.train = train + self.test_mode = test_mode + self.is_color = is_color + self.modality = modality + self.num_segments = num_segments + self.num_crop = num_crop + self.new_length = new_length + self.new_step = new_step + self.skip_length = self.new_length * self.new_step + self.temporal_jitter = temporal_jitter + self.name_pattern = name_pattern + self.video_loader = video_loader + self.video_ext = video_ext + self.use_decord = use_decord + self.transform = transform + self.lazy_init = lazy_init + + + if not self.lazy_init: + self.clips = self._make_dataset(root, setting) + if len(self.clips) == 0: + raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n" + "Check your data directory (opt.data-dir).")) + + def __getitem__(self, index): + + directory, target = self.clips[index] + if self.video_loader: + if '.' in directory.split('/')[-1]: + # data in the "setting" file already have extension, e.g., demo.mp4 + video_name = directory + else: + # data in the "setting" file do not have extension, e.g., demo + # So we need to provide extension (i.e., .mp4) to complete the file name. + video_name = '{}.{}'.format(directory, self.video_ext) + + decord_vr = decord.VideoReader(video_name, num_threads=1) + duration = len(decord_vr) + + segment_indices, skip_offsets = self._sample_train_indices(duration) + + images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets) + + process_data, mask = self.transform((images, None)) # T*C,H,W + process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W + + return (process_data, mask) + + def __len__(self): + return len(self.clips) + + def _make_dataset(self, directory, setting): + if not os.path.exists(setting): + raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting))) + clips = [] + with open(setting) as split_f: + data = split_f.readlines() + for line in data: + line_info = line.split(' ') + # line format: video_path, video_duration, video_label + if len(line_info) < 2: + raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line)) + clip_path = os.path.join(line_info[0]) + target = int(line_info[1]) + item = (clip_path, target) + clips.append(item) + return clips + + def _sample_train_indices(self, num_frames): + average_duration = (num_frames - self.skip_length + 1) // self.num_segments + if average_duration > 0: + offsets = np.multiply(list(range(self.num_segments)), + average_duration) + offsets = offsets + np.random.randint(average_duration, + size=self.num_segments) + elif num_frames > max(self.num_segments, self.skip_length): + offsets = np.sort(np.random.randint( + num_frames - self.skip_length + 1, + size=self.num_segments)) + else: + offsets = np.zeros((self.num_segments,)) + + if self.temporal_jitter: + skip_offsets = np.random.randint( + self.new_step, size=self.skip_length // self.new_step) + else: + skip_offsets = np.zeros( + self.skip_length // self.new_step, dtype=int) + return offsets + 1, skip_offsets + + + def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets): + sampled_list = [] + frame_id_list = [] + for seg_ind in indices: + offset = int(seg_ind) + for i, _ in enumerate(range(0, self.skip_length, self.new_step)): + if offset + skip_offsets[i] <= duration: + frame_id = offset + skip_offsets[i] - 1 + else: + frame_id = offset - 1 + frame_id_list.append(frame_id) + if offset + self.new_step < duration: + offset += self.new_step + try: + video_data = video_reader.get_batch(frame_id_list).asnumpy() + sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)] + except: + raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration)) + return sampled_list diff --git a/Downstream/Spatial-Temporal-Action-Localization/masking_generator.py b/Downstream/Spatial-Temporal-Action-Localization/masking_generator.py new file mode 100644 index 0000000..982f903 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/masking_generator.py @@ -0,0 +1,24 @@ +import numpy as np + +class TubeMaskingGenerator: + def __init__(self, input_size, mask_ratio): + self.frames, self.height, self.width = input_size + self.num_patches_per_frame = self.height * self.width + self.total_patches = self.frames * self.num_patches_per_frame + self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame) + self.total_masks = self.frames * self.num_masks_per_frame + + def __repr__(self): + repr_str = "Maks: total patches {}, mask patches {}".format( + self.total_patches, self.total_masks + ) + return repr_str + + def __call__(self): + mask_per_frame = np.hstack([ + np.zeros(self.num_patches_per_frame - self.num_masks_per_frame), + np.ones(self.num_masks_per_frame), + ]) + np.random.shuffle(mask_per_frame) + mask = np.tile(mask_per_frame, (self.frames,1)).flatten() + return mask diff --git a/Downstream/Spatial-Temporal-Action-Localization/modeling_finetune.py b/Downstream/Spatial-Temporal-Action-Localization/modeling_finetune.py new file mode 100644 index 0000000..a3f0ccf --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/modeling_finetune.py @@ -0,0 +1,592 @@ +from functools import partial +import numpy as np +from typing import Tuple +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model +from dataclasses import dataclass +from alphaction.modeling.poolers import make_3d_pooler +import math +import pdb + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + **kwargs + } + + +class HR2O_NL(nn.Module): + def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False): + super(HR2O_NL, self).__init__() + + self.hidden_dim = hidden_dim + + padding = kernel_size // 2 + self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, padding=padding, bias=False) + self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, padding=padding, bias=False) + self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, padding=padding, bias=False) + + self.conv = nn.Conv2d( + hidden_dim, hidden_dim, + 1 if mlp_1x1 else kernel_size, + padding=0 if mlp_1x1 else padding, + bias=False + ) + self.norm = nn.GroupNorm(1, hidden_dim, affine=True) + self.dp = nn.Dropout(0.2) + + def forward(self, x): + query = self.conv_q(x).unsqueeze(1) + key = self.conv_k(x).unsqueeze(0) + att = (query * key).sum(2) / (self.hidden_dim ** 0.5) + att = nn.Softmax(dim=1)(att) + value = self.conv_v(x) + virt_feats = (att.unsqueeze(2) * value).sum(1) + + virt_feats = self.norm(virt_feats) + virt_feats = nn.functional.relu(virt_feats) + virt_feats = self.conv(virt_feats) + virt_feats = self.dp(virt_feats) + + x = x + virt_feats + return x + + +class ACARHead(nn.Module): + def __init__(self, num_classes=60, dropout=0., bias=False, + reduce_dim=1024, hidden_dim=512, downsample='max2x2', depth=2, + kernel_size=3, mlp_1x1=False): + super(ACARHead, self).__init__() + + # actor-context feature encoder + self.conv1 = nn.Conv2d(reduce_dim * 2, hidden_dim, 1, bias=False) + self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, bias=False) + + # down-sampling before HR2O + assert downsample in ['none', 'max2x2'] + if downsample == 'none': + self.downsample = nn.Identity() + elif downsample == 'max2x2': + self.downsample = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # high-order relation reasoning operator (HR2O_NL) + layers = [] + for _ in range(depth): + layers.append(HR2O_NL(hidden_dim, kernel_size, mlp_1x1)) + self.hr2o = nn.Sequential(*layers) + + # classification + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc1 = nn.Linear(reduce_dim, hidden_dim, bias=False) + self.fc2 = nn.Linear(hidden_dim * 2, num_classes, bias=bias) + + if dropout > 0: + self.dp = nn.Dropout(dropout) + else: + self.dp = None + + # data: features, rois, num_rois, roi_ids, sizes_before_padding + # returns: outputs + def forward(self, roi_feats, roi_ids, img_features): + """ + roi_feats: [num_rois, emb_dim] + roi_ids: [num_rois] + img_features: [bs,emb_dim,t,h,w] + """ + high_order_feats = [] + cur_roi_id = 0 + # pdb.set_trace() + for idx in range(img_features.shape[0]): # iterate over mini-batch + n_rois = roi_ids[idx] + if n_rois == 0: + continue + + # eff_h, eff_w = math.ceil(h * sizes_before_padding[idx][1]), math.ceil(w * sizes_before_padding[idx][0]) + bg_feats = img_features[idx] + bg_feats = bg_feats.unsqueeze(0).repeat((n_rois, 1, 1, 1)) + actor_feats = roi_feats[cur_roi_id:cur_roi_id+roi_ids[idx]] + cur_roi_id += n_rois + tiled_actor_feats = actor_feats.unsqueeze(2).unsqueeze(2).expand_as(bg_feats) + interact_feats = torch.cat([bg_feats, tiled_actor_feats], dim=1) + + interact_feats = self.conv1(interact_feats) + interact_feats = nn.functional.relu(interact_feats) + interact_feats = self.conv2(interact_feats) + interact_feats = nn.functional.relu(interact_feats) + + interact_feats = self.downsample(interact_feats) + + interact_feats = self.hr2o(interact_feats) + interact_feats = self.gap(interact_feats) + high_order_feats.append(interact_feats) + + high_order_feats = torch.cat(high_order_feats, dim=0).view(np.sum(np.array(roi_ids)), -1) + + outputs = self.fc1(roi_feats) + outputs = nn.functional.relu(outputs) + outputs = torch.cat([outputs, high_order_feats], dim=1) + + if self.dp is not None: + outputs = self.dp(outputs) + outputs = self.fc2(outputs) + + return outputs + + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.tubelet_size = int(tubelet_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * ( + num_frames // self.tubelet_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d(in_channels=in_chans, out_channels=embed_dim, + kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1])) + + def forward(self, x, **kwargs): + # B, C, T, H, W = x.shape + # FIXME look at relaxing size constraints + # assert H == self.img_size[0] and W == self.img_size[1], \ + # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + # x = self.proj(x).flatten(2).transpose(1, 2) + x = self.proj(x) + return x + + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + ''' Sinusoid position encoding table ''' + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +@dataclass +class ROIPoolingCfg: + POOLER_RESOLUTION: int = 7 + POOLER_SCALE: float = 0.0625 + POOLER_SAMPLING_RATIO: int = 0 + POOLER_TYPE: str = 'align3d' + MEAN_BEFORE_POOLER: bool = True + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=80, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=0., + use_learnable_pos_emb=False, + init_scale=0., + all_frames=16, + tubelet_size=2, + head_type='linear', + use_mean_pooling=True): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=all_frames, + tubelet_size=self.tubelet_size) + num_patches = self.patch_embed.num_patches # 8x14x14 + self.grid_size = [img_size//patch_size, img_size//patch_size] # [14,14] + self.head_type = head_type + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values) + for i in range(depth)]) + + # self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + # self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.norm = norm_layer(embed_dim) # 这一项是预训练权重中没有的 + self.fc_norm = None + if head_type == 'acar': + self.head = ACARHead(num_classes=num_classes, hidden_dim=embed_dim, reduce_dim=embed_dim) if num_classes > 0 else nn.Identity() + trunc_normal_(self.head.fc2.weight, std=.02) + self.head.fc2.weight.data.mul_(init_scale) + # self.head.fc2.bias.data.mul_(init_scale) + else: + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + # rois setting + self.head_cfg = ROIPoolingCfg() + self.pooler = make_3d_pooler(self.head_cfg) + resolution = self.head_cfg.POOLER_RESOLUTION + self.max_pooler = nn.MaxPool2d((resolution, resolution)) + + self.test_ext = (0.1, 0.05) + self.proposal_per_clip = 100 + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x, proposals): + x = self.patch_embed(x) + B, width, t, h, w = x.size() + x = x.flatten(2).transpose(1, 2) + + # B, _, _ = x.size() + if self.pos_embed is not None: # 预测阶段插值 + # positional_embedding=[1 8*14*14 768]->[1 8*16*29 768] + pos_embed = self.pos_embed.reshape(t, -1, width) + pos_embed = interpolate_pos_embed_online( + pos_embed, self.grid_size, [h, w], 0).reshape(1, -1, width) + x = x + pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) # [b thw=8x14x14 c=768] + # if self.fc_norm is not None: # default + # return self.fc_norm(x.mean(1)) + # else: # cls_token + # return x[:, 0] + # rois + x = x.reshape(B, t, h, w, -1).permute(0, 4, 1, 2, 3) # [b c t h w] + x = x.mean(dim=2, keepdim=False) # [b c h w] + rois = self.pooler(x, proposals) # [n c 7 7] + rois = self.max_pooler(rois).view(rois.size(0), -1) # [n c] + return rois, x + + def sample_box(self, boxes): + proposals = [] + num_proposals = self.proposal_per_clip + for boxes_per_image in boxes: + num_boxes = len(boxes_per_image) + + if num_boxes > num_proposals: + choice_inds = torch.randperm(num_boxes)[:num_proposals] + proposals_per_image = boxes_per_image[choice_inds] + else: + proposals_per_image = boxes_per_image + proposals_per_image = proposals_per_image.random_aug(0.2, 0.1, 0.1, 0.05) + proposals.append(proposals_per_image) + return proposals + + + def forward(self, x, boxes): + if self.training: + proposals = self.sample_box(boxes) # 暂不考虑训练时限制action数量 + else: + proposals = [box.extend(self.test_ext) for box in boxes] + rois, x = self.forward_features(x, proposals) + if self.head_type == 'acar': + roi_ids = [len(i) for i in proposals] + rois = self.head(rois,roi_ids,x) + else: + rois = self.head(rois) + return rois + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_512(pretrained=False, **kwargs): + model = VisionTransformer( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +@register_model +def vit_huge_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +@register_model +def vit_giant_patch14_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=14, + embed_dim=1408, + depth=40, + num_heads=16, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +def interpolate_pos_embed_online( + pos_embed, orig_size: Tuple[int], new_size: Tuple[int], num_extra_tokens: int +): + extra_tokens = pos_embed[:, :num_extra_tokens] + pos_tokens = pos_embed[:, num_extra_tokens:] + embedding_size = pos_tokens.shape[-1] + pos_tokens = pos_tokens.reshape( + -1, orig_size[0], orig_size[1], embedding_size + ).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=new_size, mode="bicubic", align_corners=False, + ) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + return new_pos_embed + + +if __name__ == '__main__': + # test forward + # create proposal + from alphaction.structures.bounding_box import BoxList + + im_w, im_h = 464, 256 + n = 2 + xy = torch.zeros([n, 2]) + w = torch.rand([n, 1]) * 464 + h = torch.rand([n, 1]) * 256 + boxes = torch.cat([xy, w, h], dim=1) + boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) # guard against no boxes + + boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") + # print(boxes.bbox) + + bs = 2 + t = 16 + proposals = [boxes, boxes] # bs=2 + x = torch.rand([bs, 3, t, im_h, im_w]) + + visual_transformer = vit_base_patch16_224(head_type='acar') + print(visual_transformer) + + rois = visual_transformer(x, proposals) + print(rois.shape) # [4,num_classes] + + # pos_embed = get_sinusoid_encoding_table(8*14*14, 384) + # print(pos_embed.shape) diff --git a/Downstream/Spatial-Temporal-Action-Localization/modeling_pretrain.py b/Downstream/Spatial-Temporal-Action-Localization/modeling_pretrain.py new file mode 100644 index 0000000..fbafb94 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/modeling_pretrain.py @@ -0,0 +1,343 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from functools import partial + +from modeling_finetune import Block, _cfg, PatchEmbed, get_sinusoid_encoding_table +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_ as __call_trunc_normal_ + + +def trunc_normal_(tensor, mean=0., std=1.): + __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + + +__all__ = [ + 'pretrain_videomae_base_patch16_224', + 'pretrain_videomae_large_patch16_224', +] + + +class PretrainVisionTransformerEncoder(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, tubelet_size=2, + use_learnable_pos_emb=False): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, tubelet_size=tubelet_size) + num_patches = self.patch_embed.num_patches + + # TODO: Add the cls token + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + # sine-cosine positional embeddings + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x, mask): + _, _, T, _, _ = x.shape + x = self.patch_embed(x) + + x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() + + B, _, C = x.shape + x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible + + for blk in self.blocks: + x_vis = blk(x_vis) + + x_vis = self.norm(x_vis) + return x_vis + + def forward(self, x, mask): + x = self.forward_features(x, mask) + x = self.head(x) + return x + + +class PretrainVisionTransformerDecoder(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, patch_size=16, num_classes=768, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, num_patches=196, tubelet_size=2 + ): + super().__init__() + self.num_classes = num_classes + assert num_classes == 3 * tubelet_size * patch_size ** 2 + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.patch_size = patch_size + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x, return_token_num): + for blk in self.blocks: + x = blk(x) + + if return_token_num > 0: + x = self.head(self.norm(x[:, -return_token_num:])) # only return the mask tokens predict pixels + else: + x = self.head(self.norm(x)) + + return x + + +class PretrainVisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + encoder_in_chans=3, + encoder_num_classes=0, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + decoder_num_classes=1536, # decoder_num_classes=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=0., + use_learnable_pos_emb=False, + tubelet_size=2, + num_classes=0, # avoid the error from create_fn in timm + in_chans=0, # avoid the error from create_fn in timm + ): + super().__init__() + self.encoder = PretrainVisionTransformerEncoder( + img_size=img_size, + patch_size=patch_size, + in_chans=encoder_in_chans, + num_classes=encoder_num_classes, + embed_dim=encoder_embed_dim, + depth=encoder_depth, + num_heads=encoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + use_learnable_pos_emb=use_learnable_pos_emb) + + self.decoder = PretrainVisionTransformerDecoder( + patch_size=patch_size, + num_patches=self.encoder.patch_embed.num_patches, + num_classes=decoder_num_classes, + embed_dim=decoder_embed_dim, + depth=decoder_depth, + num_heads=decoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size) + + self.encoder_to_decoder = nn.Linear(encoder_embed_dim, decoder_embed_dim, bias=False) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + self.pos_embed = get_sinusoid_encoding_table(self.encoder.patch_embed.num_patches, decoder_embed_dim) + + trunc_normal_(self.mask_token, std=.02) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'mask_token'} + + def forward(self, x, mask): + _, _, T, _, _ = x.shape + x_vis = self.encoder(x, mask) # [B, N_vis, C_e] + x_vis = self.encoder_to_decoder(x_vis) # [B, N_vis, C_d] + B, N, C = x_vis.shape + # we don't unshuffle the correct visible token order, + # but shuffle the pos embedding accorddingly. + expand_pos_embed = self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C) + pos_emd_mask = expand_pos_embed[mask].reshape(B, -1, C) + x_full = torch.cat([x_vis + pos_emd_vis, self.mask_token + pos_emd_mask], dim=1) # [B, N, C_d] + x = self.decoder(x_full, pos_emd_mask.shape[1]) # [B, N_mask, 3 * 16 * 16] + + return x + + +@register_model +def pretrain_mae_small_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=384, + encoder_depth=12, + encoder_num_heads=6, + encoder_num_classes=0, + decoder_num_classes=1536, + decoder_embed_dim=192, + decoder_num_heads=3, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load( + kwargs["init_ckpt"], map_location="cpu" + ) + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_base_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_num_classes=0, + decoder_num_classes=1536, + decoder_embed_dim=384, + decoder_num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load( + kwargs["init_ckpt"], map_location="cpu" + ) + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_large_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load( + kwargs["init_ckpt"], map_location="cpu" + ) + model.load_state_dict(checkpoint["model"]) + return model \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/optim_factory.py b/Downstream/Spatial-Temporal-Action-Localization/optim_factory.py new file mode 100644 index 0000000..8ba4f7e --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/optim_factory.py @@ -0,0 +1,190 @@ +import torch +from torch import optim as optim + +from timm.optim.adafactor import Adafactor +from timm.optim.adahessian import Adahessian +from timm.optim.adamp import AdamP +from timm.optim.lookahead import Lookahead +from timm.optim.nadam import Nadam +from timm.optim.novograd import NovoGrad +from timm.optim.nvnovograd import NvNovoGrad +from timm.optim.radam import RAdam +from timm.optim.rmsprop_tf import RMSpropTF +from timm.optim.sgdp import SGDP + +import json + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def get_num_layer_for_vit(var_name, num_max_layer): + if var_name in ("cls_token", "mask_token", "pos_embed"): + return 0 + elif var_name.startswith("patch_embed") or var_name.startswith("encoder.patch_embed"): + return 0 + elif var_name.startswith("rel_pos_bias"): + return num_max_layer - 1 + elif var_name.startswith("blocks") or var_name.startswith("encoder.blocks"): + if var_name.startswith("encoder.blocks"): + var_name = var_name[8:] + layer_id = int(var_name.split('.')[1]) + return layer_id + 1 + else: + return num_max_layer - 1 + + +class LayerDecayValueAssigner(object): + def __init__(self, values): + self.values = values + + def get_scale(self, layer_id): + return self.values[layer_id] + + def get_layer_id(self, var_name): + return get_num_layer_for_vit(var_name, len(self.values)) + + +def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None, + lr_scale=1.0): + parameter_group_names = {} + parameter_group_vars = {} + + # 在这里修改区分scale,encoder一个学习率,其他人一个学习率 + # layer_decay: 需要加上get_num_layer和get_layer_scale + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if (len(param.shape) == 1 or name.endswith(".bias") or name in skip_list) and name.startswith('encoder.'): + group_name = "no_decay_encoder" + this_weight_decay = 0. + scale = 1.0 + elif len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + group_name = "no_decay_others" + this_weight_decay = 0. + scale = lr_scale + elif name.startswith('encoder.'): + group_name = "decay_encoder" + this_weight_decay = weight_decay + scale = 1.0 + else: + group_name = "decay_others" + this_weight_decay = weight_decay + scale = lr_scale + + if get_num_layer is not None: + layer_id = get_num_layer(name) + group_name = "layer_%d_%s" % (layer_id, group_name) + else: + layer_id = None + + if group_name not in parameter_group_names: + if get_layer_scale is not None: + scale = get_layer_scale(layer_id) * scale + + parameter_group_names[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + parameter_group_vars[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + + parameter_group_vars[group_name]["params"].append(param) + parameter_group_names[group_name]["params"].append(name) + print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) + return list(parameter_group_vars.values()) + + +def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None, + lr_scale=1.0): + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if weight_decay and filter_bias_and_bn: + skip = {} + if skip_list is not None: + skip = skip_list + elif hasattr(model, 'no_weight_decay'): + skip = model.no_weight_decay() + parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale, lr_scale=lr_scale) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(lr=args.lr, weight_decay=weight_decay) + if hasattr(args, 'opt_eps') and args.opt_eps is not None: + opt_args['eps'] = args.opt_eps + if hasattr(args, 'opt_betas') and args.opt_betas is not None: + opt_args['betas'] = args.opt_betas + + print("optimizer settings:", opt_args) + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'nadam': + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adafactor': + if not args.lr: + opt_args['lr'] = None + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) + elif opt_lower == 'novograd': + optimizer = NovoGrad(parameters, **opt_args) + elif opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/Downstream/Spatial-Temporal-Action-Localization/rand_augment.py b/Downstream/Spatial-Temporal-Action-Localization/rand_augment.py new file mode 100644 index 0000000..37c57d1 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/rand_augment.py @@ -0,0 +1,531 @@ +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py +pulished under an Apache License 2.0. + +COMMENT FROM ORIGINAL: +AutoAugment, RandAugment, and AugMix for PyTorch +This code implements the searched ImageNet policies with various tweaks and +improvements and does not include any of the search code. AA and RA +Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py +AugMix adapted from: + https://github.com/google-research/augmix +Papers: + AutoAugment: Learning Augmentation Policies from Data + https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection + https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... + https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and + Uncertainty https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +import numpy as np +import random +import re +import PIL +from PIL import Image, ImageEnhance, ImageOps + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]]) + +_FILL = (128, 128, 128) + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10.0 + +_HPARAMS_DEFAULT = { + "translate_const": 250, + "img_mean": _FILL, +} + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop("resample", Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if "fillcolor" in kwargs and _PIL_VER < (5, 0): + kwargs.pop("fillcolor") + kwargs["resample"] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs + ) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs + ) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], + -rotn_center[1] - post_trans[1], + matrix, + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs["resample"]) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _MAX_LEVEL) * 30.0 + level = _randomly_negate(level) + return (level,) + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return ((level / _MAX_LEVEL) * 1.8 + 0.1,) + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] + level = (level / _MAX_LEVEL) * 0.9 + level = 1.0 + _randomly_negate(level) + return (level,) + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _MAX_LEVEL) * 0.3 + level = _randomly_negate(level) + return (level,) + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams["translate_const"] + level = (level / _MAX_LEVEL) * float(translate_const) + level = _randomly_negate(level) + return (level,) + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get("translate_pct", 0.45) + level = (level / _MAX_LEVEL) * translate_pct + level = _randomly_negate(level) + return (level,) + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4),) + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return (4 - _posterize_level_to_arg(level, hparams)[0],) + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4) + 4,) + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 256),) + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return (256 - _solarize_level_to_arg(level, _hparams)[0],) + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return (int((level / _MAX_LEVEL) * 110),) + + +LEVEL_TO_ARG = { + "AutoContrast": None, + "Equalize": None, + "Invert": None, + "Rotate": _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + "Posterize": _posterize_level_to_arg, + "PosterizeIncreasing": _posterize_increasing_level_to_arg, + "PosterizeOriginal": _posterize_original_level_to_arg, + "Solarize": _solarize_level_to_arg, + "SolarizeIncreasing": _solarize_increasing_level_to_arg, + "SolarizeAdd": _solarize_add_level_to_arg, + "Color": _enhance_level_to_arg, + "ColorIncreasing": _enhance_increasing_level_to_arg, + "Contrast": _enhance_level_to_arg, + "ContrastIncreasing": _enhance_increasing_level_to_arg, + "Brightness": _enhance_level_to_arg, + "BrightnessIncreasing": _enhance_increasing_level_to_arg, + "Sharpness": _enhance_level_to_arg, + "SharpnessIncreasing": _enhance_increasing_level_to_arg, + "ShearX": _shear_level_to_arg, + "ShearY": _shear_level_to_arg, + "TranslateX": _translate_abs_level_to_arg, + "TranslateY": _translate_abs_level_to_arg, + "TranslateXRel": _translate_rel_level_to_arg, + "TranslateYRel": _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + "AutoContrast": auto_contrast, + "Equalize": equalize, + "Invert": invert, + "Rotate": rotate, + "Posterize": posterize, + "PosterizeIncreasing": posterize, + "PosterizeOriginal": posterize, + "Solarize": solarize, + "SolarizeIncreasing": solarize, + "SolarizeAdd": solarize_add, + "Color": color, + "ColorIncreasing": color, + "Contrast": contrast, + "ContrastIncreasing": contrast, + "Brightness": brightness, + "BrightnessIncreasing": brightness, + "Sharpness": sharpness, + "SharpnessIncreasing": sharpness, + "ShearX": shear_x, + "ShearY": shear_y, + "TranslateX": translate_x_abs, + "TranslateY": translate_y_abs, + "TranslateXRel": translate_x_rel, + "TranslateYRel": translate_y_rel, +} + + +class AugmentOp: + """ + Apply for video. + """ + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = { + "fillcolor": hparams["img_mean"] + if "img_mean" in hparams + else _FILL, + "resample": hparams["interpolation"] + if "interpolation" in hparams + else _RANDOM_INTERPOLATION, + } + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + self.magnitude_std = self.hparams.get("magnitude_std", 0) + + def __call__(self, img_list): + if self.prob < 1.0 and random.random() > self.prob: + return img_list + magnitude = self.magnitude + if self.magnitude_std and self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range + level_args = ( + self.level_fn(magnitude, self.hparams) + if self.level_fn is not None + else () + ) + + if isinstance(img_list, list): + return [ + self.aug_fn(img, *level_args, **self.kwargs) for img in img_list + ] + else: + return self.aug_fn(img_list, *level_args, **self.kwargs) + + +_RAND_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "Posterize", + "Solarize", + "SolarizeAdd", + "Color", + "Contrast", + "Brightness", + "Sharpness", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +_RAND_INCREASING_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "PosterizeIncreasing", + "SolarizeIncreasing", + "SolarizeAdd", + "ColorIncreasing", + "ContrastIncreasing", + "BrightnessIncreasing", + "SharpnessIncreasing", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + "Rotate": 0.3, + "ShearX": 0.2, + "ShearY": 0.2, + "TranslateXRel": 0.1, + "TranslateYRel": 0.1, + "Color": 0.025, + "Sharpness": 0.025, + "AutoContrast": 0.025, + "Solarize": 0.005, + "SolarizeAdd": 0.005, + "Contrast": 0.005, + "Brightness": 0.005, + "Equalize": 0.005, + "Posterize": 0, + "Invert": 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [ + AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) + for name in transforms + ] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, + self.num_layers, + replace=self.choice_weights is None, + p=self.choice_weights, + ) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + + Create a RandAugment transform + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + :return: A PyTorch compatible Transform + """ + magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split("-") + assert config[0] == "rand" + config = config[1:] + for c in config: + cs = re.split(r"(\d.*)", c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == "mstd": + # noise param injected via hparams for now + hparams.setdefault("magnitude_std", float(val)) + elif key == "inc": + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == "m": + magnitude = int(val) + elif key == "n": + num_layers = int(val) + elif key == "w": + weight_idx = int(val) + else: + assert NotImplementedError + ra_ops = rand_augment_ops( + magnitude=magnitude, hparams=hparams, transforms=transforms + ) + choice_weights = ( + None if weight_idx is None else _select_rand_weights(weight_idx) + ) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) diff --git a/Downstream/Spatial-Temporal-Action-Localization/random_erasing.py b/Downstream/Spatial-Temporal-Action-Localization/random_erasing.py new file mode 100644 index 0000000..b46547b --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/random_erasing.py @@ -0,0 +1,173 @@ +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py +pulished under an Apache License 2.0. +""" +import math +import random +import torch + + +def _get_pixels( + per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda" +): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty( + (patch_size[0], 1, 1), dtype=dtype, device=device + ).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, + min_area=0.02, + max_area=1 / 3, + min_aspect=0.3, + max_aspect=None, + mode="const", + min_count=1, + max_count=None, + num_splits=0, + device="cuda", + cube=True, + ): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + self.cube = cube + if mode == "rand": + self.rand_color = True # per block random normal + elif mode == "pixel": + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == "const" + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(10): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top : top + h, left : left + w] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def _erase_cube( + self, + img, + batch_start, + batch_size, + chan, + img_h, + img_w, + dtype, + ): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(100): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + for i in range(batch_start, batch_size): + img_instance = img[i] + img_instance[ + :, top : top + h, left : left + w + ] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = ( + batch_size // self.num_splits if self.num_splits > 1 else 0 + ) + if self.cube: + self._erase_cube( + input, + batch_start, + batch_size, + chan, + img_h, + img_w, + input.dtype, + ) + else: + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input diff --git a/Downstream/Spatial-Temporal-Action-Localization/run_class_finetuning.py b/Downstream/Spatial-Temporal-Action-Localization/run_class_finetuning.py new file mode 100644 index 0000000..3b7c24d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/run_class_finetuning.py @@ -0,0 +1,505 @@ +import argparse +import datetime +from operator import is_ +import numpy as np +import time +import torch +import torch.backends.cudnn as cudnn + +import json +import os +from functools import partial +from pathlib import Path +from collections import OrderedDict + +from timm.data.mixup import Mixup +from timm.models import create_model +from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from timm.utils import ModelEma +from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner + +from datasets import build_ava_dataset, BatchCollator, build_kinetics_dataset, build_ak_dataset +from engine_for_finetuning import train_one_epoch, validation_one_epoch, final_test, merge +from utils import NativeScalerWithGradNormCount as NativeScaler +from utils import multiple_samples_collate +import utils +import modeling_finetune +from data.transforms import build_transforms + + +def get_args(): + parser = argparse.ArgumentParser('VideoMAE fine-tuning and evaluation script for video classification', + add_help=False) + parser.add_argument('--batch_size', default=64, type=int) # + parser.add_argument('--epochs', default=30, type=int) # + parser.add_argument('--update_freq', default=1, type=int) + parser.add_argument('--save_ckpt_freq', default=100, type=int) # + parser.add_argument('--val_freq', default=2, type=int) # + parser.add_argument('--enable_randaug', action='store_true', default=False) # + parser.add_argument('--aug_type',default='strong', type=str, choices=['strong','default','default_w_cutout','default_wo_affine']) # + # Model parameters + parser.add_argument('--model', default='vit_base_patch16_224', type=str, metavar='MODEL', + help='Name of model to train') + parser.add_argument('--tubelet_size', type=int, default=2) + parser.add_argument('--input_size', default=224, type=int, + help='videos input size') + + parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') + parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', + help='Attention dropout rate (default: 0.)') + parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', + help='Drop path rate (default: 0.1)') + + parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) + parser.add_argument('--model_ema', action='store_true', default=False) + parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') + parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') + parser.add_argument('--head_type',type=str, default='linear', choices=['linear', 'acar']) + + # Optimizer parameters + parser.add_argument('--accumulation_step',type=int,default=1) + parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the + weight decay. We use a cosine schedule for WD and using a larger decay by + the end of training improves performance for ViTs.""") + + parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', + help='learning rate (default: 1e-3)') + parser.add_argument('--layer_decay', type=float, default=0.75) + + parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', + help='warmup learning rate (default: 1e-6)') + parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + + parser.add_argument('--warmup_epochs', type=int, default=2, metavar='N', # + help='epochs to warmup LR, if scheduler supports') + parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', + help='num of steps to warmup LR, will overload warmup_epochs if set > 0') + + # Augmentation parameters + parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', # no use + help='Color jitter factor (default: 0.4)') + parser.add_argument('--num_sample', type=int, default=2, # 1 + help='Repeated_aug (default: 2)') + parser.add_argument('--aa', type=str, default='rand-m7-n4-mstd0.5-inc1', metavar='NAME', # no use + help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'), + parser.add_argument('--smoothing', type=float, default=0.1, # 0. + help='Label smoothing (default: 0.1)') + parser.add_argument('--train_interpolation', type=str, default='bicubic', # + help='Training interpolation (random, bilinear, bicubic default: "bicubic")') + + # Evaluation parameters, no use + parser.add_argument('--crop_pct', type=float, default=None) + parser.add_argument('--short_side_size', type=int, default=224) + parser.add_argument('--test_num_segment', type=int, default=5) + parser.add_argument('--test_num_crop', type=int, default=3) + + # Random Erase params, no use + parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', + help='Random erase prob (default: 0.25)') + parser.add_argument('--remode', type=str, default='pixel', + help='Random erase mode (default: "pixel")') + parser.add_argument('--recount', type=int, default=1, + help='Random erase count (default: 1)') + parser.add_argument('--resplit', action='store_true', default=False, + help='Do not random erase first (clean) augmentation split') + + # Mixup params, no use + parser.add_argument('--mixup', type=float, default=0.8, + help='mixup alpha, mixup enabled if > 0.') + parser.add_argument('--cutmix', type=float, default=1.0, + help='cutmix alpha, cutmix enabled if > 0.') + parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, + help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') + parser.add_argument('--mixup_prob', type=float, default=1.0, + help='Probability of performing mixup or cutmix when either/both is enabled') + parser.add_argument('--mixup_switch_prob', type=float, default=0.5, + help='Probability of switching to cutmix when both mixup and cutmix enabled') + parser.add_argument('--mixup_mode', type=str, default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') + + # Finetuning params + parser.add_argument('--finetune', default='', help='finetune from checkpoint') + parser.add_argument('--model_key', default='model|module', type=str) + parser.add_argument('--model_prefix', default='', type=str) + parser.add_argument('--init_scale', default=0.001, type=float) # head init + parser.add_argument('--use_mean_pooling', action='store_true') # cls_token + parser.set_defaults(use_mean_pooling=True) + parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') + + # Dataset parameters + parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str, + help='dataset path') # + parser.add_argument('--eval_data_path', default=None, type=str, # + help='dataset path for evaluation') + parser.add_argument('--nb_classes', default=80, type=int, # + help='number of the classification types') + parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') + parser.add_argument('--num_segments', type=int, default=1) + parser.add_argument('--num_frames', type=int, default=16) + parser.add_argument('--sampling_rate', type=int, default=4) + parser.add_argument('--data_set', default='ava', + choices=['ava','ava-kinetics'], + type=str, help='dataset') # + parser.add_argument('--output_dir', default='', # + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default=None, # + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', + help='resume from checkpoint') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') + parser.set_defaults(auto_resume=True) + + parser.add_argument('--save_ckpt', action='store_true') + parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') + parser.set_defaults(save_ckpt=True) + + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') + parser.add_argument('--dist_eval', action='store_true', default=False, # + help='Enabling distributed evaluation') + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', + help='url used to set up distributed training') + + parser.add_argument('--eval_kinetics',action='store_true',default=False) + parser.add_argument('--enable_deepspeed', action='store_true', default=False) + + known_args, _ = parser.parse_known_args() + + if known_args.enable_deepspeed: + try: + import deepspeed + from deepspeed import DeepSpeedConfig + parser = deepspeed.add_config_arguments(parser) + ds_init = deepspeed.initialize + except: + print("Please 'pip install deepspeed'") + exit(0) + else: + ds_init = None + + return parser.parse_args(), ds_init + + +def main(args, ds_init): + utils.init_distributed_mode(args) + + if ds_init is not None: + utils.create_ds_config(args) + + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + transform_train = build_transforms(is_train=True,args=args) + transform_val = build_transforms(is_train=False,args=args) + + if args.data_set == 'ava': + dataset_train = build_ava_dataset(is_train=True, transforms=transform_train) + else: + dataset_train = build_ak_dataset(is_train=True, transforms=transform_train) + + if args.eval_kinetics: + dataset_val = build_kinetics_dataset(is_train=False, transforms=transform_val) + else: + dataset_val = build_ava_dataset(is_train=False, transforms=transform_val) + + num_tasks = utils.get_world_size() + global_rank = utils.get_rank() + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True + ) + print("Sampler_train = %s" % str(sampler_train)) + if args.dist_eval: + if len(dataset_val) % num_tasks != 0: + print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' + 'This will slightly alter validation results as extra duplicate entries are added to achieve ' + 'equal num of samples per-process.') + sampler_val = torch.utils.data.DistributedSampler( + dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) + # sampler_test = torch.utils.data.DistributedSampler( + # dataset_test, num_replicas=num_tasks, rank=global_rank, shuffle=False) + else: # + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + if global_rank == 0 and args.log_dir is not None: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = utils.TensorboardLogger(log_dir=args.log_dir) + else: + log_writer = None + + collate_func = BatchCollator(size_divisible=16) + data_loader_train = torch.utils.data.DataLoader( + dataset_train, sampler=sampler_train, + batch_size=args.batch_size, # 8 + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True, + collate_fn=collate_func, + ) + data_loader_train.num_samples = len(dataset_train) + + if dataset_val is not None: + data_loader_val = torch.utils.data.DataLoader( + dataset_val, sampler=sampler_val, + batch_size=4, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False, + collate_fn=collate_func, + ) + data_loader_val.num_samples = len(dataset_val) + else: + data_loader_val = None + + mixup_fn = None + + model = create_model( + args.model, + pretrained=False, + num_classes=args.nb_classes, + all_frames=args.num_frames * args.num_segments, + tubelet_size=args.tubelet_size, + drop_rate=args.drop, + drop_path_rate=args.drop_path, + attn_drop_rate=args.attn_drop_rate, + drop_block_rate=None, + use_mean_pooling=args.use_mean_pooling, + init_scale=args.init_scale, + head_type=args.head_type + ) + + patch_size = model.patch_embed.patch_size # 16 + print("Patch size = %s" % str(patch_size)) + # no use + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], args.input_size // patch_size[1]) + args.patch_size = patch_size + + if args.finetune: + if args.finetune.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.finetune, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load ckpt from %s" % args.finetune) + checkpoint_model = None + for model_key in args.model_key.split('|'): + if model_key in checkpoint: + checkpoint_model = checkpoint[model_key] + print("Load state_dict by model_key = %s" % model_key) + break + if checkpoint_model is None: + checkpoint_model = checkpoint + state_dict = model.state_dict() + try: + for k in ['head.weight', 'head.bias']: # + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + except KeyError: + del checkpoint_model['head.weight'] + del checkpoint_model['head.bias'] + all_keys = list(checkpoint_model.keys()) + new_dict = OrderedDict() + for key in all_keys: + if key.startswith('backbone.'): + new_dict[key[9:]] = checkpoint_model[key] + elif key.startswith('encoder.'): + new_dict[key[8:]] = checkpoint_model[key] + else: + new_dict[key] = checkpoint_model[key] + checkpoint_model = new_dict + + # interpolate position embedding + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] # channel dim + num_patches = model.patch_embed.num_patches # + num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1 + + # height (== width) for the checkpoint position embedding + orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens) // ( + args.num_frames // model.patch_embed.tubelet_size)) ** 0.5) + # height (== width) for the new position embedding + new_size = int((num_patches // (args.num_frames // model.patch_embed.tubelet_size)) ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + # B, L, C -> BT, H, W, C -> BT, C, H, W + pos_tokens = pos_tokens.reshape(-1, args.num_frames // model.patch_embed.tubelet_size, orig_size, + orig_size, embedding_size) + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C + pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, + args.num_frames // model.patch_embed.tubelet_size, + new_size, new_size, embedding_size) + pos_tokens = pos_tokens.flatten(1, 3) # B, L, C + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix) + + model.to(device) + + model_ema = None + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params:', n_parameters) + + total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() + num_training_steps_per_epoch = len(dataset_train) // total_batch_size + args.lr = args.lr * total_batch_size / 256 + args.min_lr = args.min_lr * total_batch_size / 256 + args.warmup_lr = args.warmup_lr * total_batch_size / 256 + print("LR = %.8f" % args.lr) + print("Batch size = %d" % total_batch_size) + print("Update frequent = %d" % args.update_freq) + print("Number of training examples = %d" % len(dataset_train)) + print("Number of training training per epoch = %d" % num_training_steps_per_epoch) + + num_layers = model_without_ddp.get_num_layers() + if args.layer_decay < 1.0: + assigner = LayerDecayValueAssigner( + list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2))) + else: + assigner = None + + if assigner is not None: + print("Assigned values = %s" % str(assigner.values)) + + skip_weight_decay_list = model.no_weight_decay() + print("Skip weight decay list: ", skip_weight_decay_list) + + if args.enable_deepspeed: + loss_scaler = None + optimizer_params = get_parameter_groups( + model, args.weight_decay, skip_weight_decay_list, + assigner.get_layer_id if assigner is not None else None, + assigner.get_scale if assigner is not None else None) + model, optimizer, _, _ = ds_init( + args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, + ) + + print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) + assert model.gradient_accumulation_steps() == args.update_freq + else: + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) + model_without_ddp = model.module + + optimizer = create_optimizer( + args, model_without_ddp, skip_list=skip_weight_decay_list, + get_num_layer=assigner.get_layer_id if assigner is not None else None, + get_layer_scale=assigner.get_scale if assigner is not None else None) + loss_scaler = NativeScaler() + + print("Use step level LR scheduler!") + lr_schedule_values = utils.cosine_scheduler( + args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, + warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, + ) + if args.weight_decay_end is None: + args.weight_decay_end = args.weight_decay + wd_schedule_values = utils.cosine_scheduler( + args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) + print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) + + utils.auto_load_model( + args=args, model=model, model_without_ddp=model_without_ddp, + optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) + + if args.eval: + validation_one_epoch(data_loader_val, model, device, args.output_dir, args.start_epoch, log_writer) + exit(0) + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + if log_writer is not None: + log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq) + train_stats = train_one_epoch( + model, data_loader_train, optimizer, + device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn, + log_writer=log_writer, start_steps=epoch * num_training_steps_per_epoch, + lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, + num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq, + ) + # 1. save ckpt + if args.output_dir and args.save_ckpt: + if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: + utils.save_model( + args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema) + # 2. log + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters} + + if args.output_dir and utils.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + # 3. eval + if data_loader_val is not None and ( + (epoch + 1) % args.val_freq == 0 or epoch + 1 == args.epochs): + validation_one_epoch(data_loader_val, model, device, args.output_dir, epoch, log_writer) + torch.distributed.barrier() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + opts, ds_init = get_args() + if opts.output_dir: + Path(opts.output_dir).mkdir(parents=True, exist_ok=True) + main(opts, ds_init) diff --git a/Downstream/Spatial-Temporal-Action-Localization/run_mae_pretraining.py b/Downstream/Spatial-Temporal-Action-Localization/run_mae_pretraining.py new file mode 100644 index 0000000..748eed6 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/run_mae_pretraining.py @@ -0,0 +1,260 @@ +import argparse +import datetime +import numpy as np +import time +import torch +import torch.backends.cudnn as cudnn +import json +import os +from pathlib import Path +from timm.models import create_model +from optim_factory import create_optimizer +from datasets import build_pretraining_dataset +from engine_for_pretraining import train_one_epoch +from utils import NativeScalerWithGradNormCount as NativeScaler +import utils +import modeling_pretrain + + +def get_args(): + parser = argparse.ArgumentParser('VideoMAE pre-training script', add_help=False) + parser.add_argument('--batch_size', default=64, type=int) + parser.add_argument('--epochs', default=800, type=int) + parser.add_argument('--save_ckpt_freq', default=50, type=int) + + # Model parameters + parser.add_argument('--model', default='pretrain_videomae_base_patch16_224', type=str, metavar='MODEL', + help='Name of model to train') + + parser.add_argument('--decoder_depth', default=4, type=int, + help='depth of decoder') + + parser.add_argument('--mask_type', default='tube', choices=['random', 'tube'], + type=str, help='masked strategy of video tokens/patches') + + parser.add_argument('--mask_ratio', default=0.75, type=float, + help='ratio of the visual tokens/patches need be masked') + + parser.add_argument('--input_size', default=224, type=int, + help='videos input size for backbone') + + parser.add_argument('--drop_path', type=float, default=0.0, metavar='PCT', + help='Drop path rate (default: 0.1)') + + parser.add_argument('--normlize_target', default=True, type=bool, + help='normalized the target patch pixels') + + # Optimizer parameters + parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the + weight decay. We use a cosine schedule for WD. + (Set the same value with args.weight_decay to keep weight decay no change)""") + + parser.add_argument('--lr', type=float, default=1.5e-4, metavar='LR', + help='learning rate (default: 1.5e-4)') + parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', + help='warmup learning rate (default: 1e-6)') + parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + + parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', + help='epochs to warmup LR, if scheduler supports') + + # Augmentation parameters + parser.add_argument('--color_jitter', type=float, default=0.0, metavar='PCT', + help='Color jitter factor (default: 0.4)') + parser.add_argument('--train_interpolation', type=str, default='bicubic', + help='Training interpolation (random, bilinear, bicubic default: "bicubic")') + + # Dataset parameters + parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str, + help='dataset path') + parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') + parser.add_argument('--num_frames', type=int, default=16) + parser.add_argument('--sampling_rate', type=int, default=4) + parser.add_argument('--output_dir', default='', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default=None, + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') + parser.set_defaults(auto_resume=True) + + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', + help='') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') + + return parser.parse_args() + + +def get_model(args): + print(f"Creating model: {args.model}") + model = create_model( + args.model, + pretrained=False, + drop_path_rate=args.drop_path, + drop_block_rate=None, + decoder_depth=args.decoder_depth + ) + return model + + +def main(args): + utils.init_distributed_mode(args) + + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + + cudnn.benchmark = True + + model = get_model(args) + patch_size = model.encoder.patch_embed.patch_size + print("Patch size = %s" % str(patch_size)) + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], args.input_size // patch_size[1]) + args.patch_size = patch_size + + # get dataset + dataset_train = build_pretraining_dataset(args) + + num_tasks = utils.get_world_size() + global_rank = utils.get_rank() + sampler_rank = global_rank + num_training_steps_per_epoch = len(dataset_train) // args.batch_size // num_tasks + + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True + ) + print("Sampler_train = %s" % str(sampler_train)) + + if global_rank == 0 and args.log_dir is not None: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = utils.TensorboardLogger(log_dir=args.log_dir) + else: + log_writer = None + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True, + worker_init_fn=utils.seed_worker + ) + + model.to(device) + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params: {} M'.format(n_parameters / 1e6)) + + total_batch_size = args.batch_size * utils.get_world_size() + + args.lr = args.lr * total_batch_size / 256 + args.min_lr = args.min_lr * total_batch_size / 256 + args.warmup_lr = args.warmup_lr * total_batch_size / 256 + print("LR = %.8f" % args.lr) + print("Batch size = %d" % total_batch_size) + print("Number of training steps = %d" % num_training_steps_per_epoch) + print("Number of training examples per epoch = %d" % (total_batch_size * num_training_steps_per_epoch)) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) + model_without_ddp = model.module + + optimizer = create_optimizer( + args, model_without_ddp) + loss_scaler = NativeScaler() + + print("Use step level LR & WD scheduler!") + lr_schedule_values = utils.cosine_scheduler( + args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, + warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, + ) + if args.weight_decay_end is None: + args.weight_decay_end = args.weight_decay + wd_schedule_values = utils.cosine_scheduler( + args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) + print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) + + # resume + utils.auto_load_model( + args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) + torch.cuda.empty_cache() + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + if log_writer is not None: + log_writer.set_step(epoch * num_training_steps_per_epoch) + train_stats = train_one_epoch( + model, data_loader_train, + optimizer, device, epoch, loss_scaler, + args.clip_grad, log_writer=log_writer, + start_steps=epoch * num_training_steps_per_epoch, + lr_schedule_values=lr_schedule_values, + wd_schedule_values=wd_schedule_values, + patch_size=patch_size[0], + normlize_target=args.normlize_target, + ) + if args.output_dir: + if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: + utils.save_model( + args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, + loss_scaler=loss_scaler, epoch=epoch) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + 'epoch': epoch, 'n_parameters': n_parameters} + + if args.output_dir and utils.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + opts = get_args() + if opts.output_dir: + Path(opts.output_dir).mkdir(parents=True, exist_ok=True) + main(opts) diff --git a/Downstream/Spatial-Temporal-Action-Localization/run_videomae_vis.py b/Downstream/Spatial-Temporal-Action-Localization/run_videomae_vis.py new file mode 100644 index 0000000..b109762 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/run_videomae_vis.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +import argparse +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from PIL import Image +from pathlib import Path +from timm.models import create_model +from datasets import DataAugmentationForVideoMAE +from torchvision.transforms import ToPILImage +from einops import rearrange +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from decord import VideoReader, cpu +from torchvision import transforms +from transforms import * +from masking_generator import TubeMaskingGenerator + +class DataAugmentationForVideoMAE(object): + def __init__(self, args): + self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN + self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD + normalize = GroupNormalize(self.input_mean, self.input_std) + self.train_augmentation = GroupCenterCrop(args.input_size) + self.transform = transforms.Compose([ + self.train_augmentation, + Stack(roll=False), + ToTorchFormatTensor(div=True), + normalize, + ]) + if args.mask_type == 'tube': + self.masked_position_generator = TubeMaskingGenerator( + args.window_size, args.mask_ratio + ) + + def __call__(self, images): + process_data , _ = self.transform(images) + return process_data, self.masked_position_generator() + + def __repr__(self): + repr = "(DataAugmentationForVideoMAE,\n" + repr += " transform = %s,\n" % str(self.transform) + repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) + repr += ")" + return repr + +def get_args(): + parser = argparse.ArgumentParser('VideoMAE visualization reconstruction script', add_help=False) + parser.add_argument('img_path', type=str, help='input video path') + parser.add_argument('save_path', type=str, help='save video path') + parser.add_argument('model_path', type=str, help='checkpoint path of model') + parser.add_argument('--mask_type', default='random', choices=['random', 'tube'], + type=str, help='masked strategy of video tokens/patches') + parser.add_argument('--num_frames', type=int, default= 16) + parser.add_argument('--sampling_rate', type=int, default= 4) + parser.add_argument('--decoder_depth', default=4, type=int, + help='depth of decoder') + parser.add_argument('--input_size', default=224, type=int, + help='videos input size for backbone') + parser.add_argument('--device', default='cuda:0', + help='device to use for training / testing') + parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') + parser.add_argument('--mask_ratio', default=0.75, type=float, + help='ratio of the visual tokens/patches need be masked') + # Model parameters + parser.add_argument('--model', default='pretrain_videomae_base_patch16_224', type=str, metavar='MODEL', + help='Name of model to vis') + parser.add_argument('--drop_path', type=float, default=0.0, metavar='PCT', + help='Drop path rate (default: 0.1)') + + return parser.parse_args() + + +def get_model(args): + print(f"Creating model: {args.model}") + model = create_model( + args.model, + pretrained=False, + drop_path_rate=args.drop_path, + drop_block_rate=None, + decoder_depth=args.decoder_depth + ) + + return model + + +def main(args): + print(args) + + device = torch.device(args.device) + cudnn.benchmark = True + + model = get_model(args) + patch_size = model.encoder.patch_embed.patch_size + print("Patch size = %s" % str(patch_size)) + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], args.input_size // patch_size[1]) + args.patch_size = patch_size + + model.to(device) + checkpoint = torch.load(args.model_path, map_location='cpu') + model.load_state_dict(checkpoint['model']) + model.eval() + + if args.save_path: + Path(args.save_path).mkdir(parents=True, exist_ok=True) + + with open(args.img_path, 'rb') as f: + vr = VideoReader(f, ctx=cpu(0)) + duration = len(vr) + new_length = 1 + new_step = 1 + skip_length = new_length * new_step + # frame_id_list = [1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61] + + + tmp = np.arange(0,32, 2) + 60 + frame_id_list = tmp.tolist() + # average_duration = (duration - skip_length + 1) // args.num_frames + # if average_duration > 0: + # frame_id_list = np.multiply(list(range(args.num_frames)), + # average_duration) + # frame_id_list = frame_id_list + np.random.randint(average_duration, + # size=args.num_frames) + + video_data = vr.get_batch(frame_id_list).asnumpy() + print(video_data.shape) + img = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)] + + transforms = DataAugmentationForVideoMAE(args) + img, bool_masked_pos = transforms((img, None)) # T*C,H,W + # print(img.shape) + img = img.view((args.num_frames , 3) + img.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W + # img = img.view(( -1 , args.num_frames) + img.size()[-2:]) + bool_masked_pos = torch.from_numpy(bool_masked_pos) + + with torch.no_grad(): + # img = img[None, :] + # bool_masked_pos = bool_masked_pos[None, :] + img = img.unsqueeze(0) + print(img.shape) + bool_masked_pos = bool_masked_pos.unsqueeze(0) + + img = img.to(device, non_blocking=True) + bool_masked_pos = bool_masked_pos.to(device, non_blocking=True).flatten(1).to(torch.bool) + outputs = model(img, bool_masked_pos) + + #save original video + mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, None, None, None] + std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, None, None, None] + ori_img = img * std + mean # in [0, 1] + imgs = [ToPILImage()(ori_img[0,:,vid,:,:].cpu()) for vid, _ in enumerate(frame_id_list) ] + for id, im in enumerate(imgs): + im.save(f"{args.save_path}/ori_img{id}.jpg") + + img_squeeze = rearrange(ori_img, 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', p0=2, p1=patch_size[0], p2=patch_size[0]) + img_norm = (img_squeeze - img_squeeze.mean(dim=-2, keepdim=True)) / (img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) + img_patch = rearrange(img_norm, 'b n p c -> b n (p c)') + img_patch[bool_masked_pos] = outputs + + #make mask + mask = torch.ones_like(img_patch) + mask[bool_masked_pos] = 0 + mask = rearrange(mask, 'b n (p c) -> b n p c', c=3) + mask = rearrange(mask, 'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2) ', p0=2, p1=patch_size[0], p2=patch_size[1], h=14, w=14) + + #save reconstruction video + rec_img = rearrange(img_patch, 'b n (p c) -> b n p c', c=3) + # Notice: To visualize the reconstruction video, we add the predict and the original mean and var of each patch. + rec_img = rec_img * (img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) + img_squeeze.mean(dim=-2, keepdim=True) + rec_img = rearrange(rec_img, 'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2)', p0=2, p1=patch_size[0], p2=patch_size[1], h=14, w=14) + imgs = [ ToPILImage()(rec_img[0, :, vid, :, :].cpu().clamp(0,0.996)) for vid, _ in enumerate(frame_id_list) ] + + for id, im in enumerate(imgs): + im.save(f"{args.save_path}/rec_img{id}.jpg") + + #save masked video + img_mask = rec_img * mask + imgs = [ToPILImage()(img_mask[0, :, vid, :, :].cpu()) for vid, _ in enumerate(frame_id_list)] + for id, im in enumerate(imgs): + im.save(f"{args.save_path}/mask_img{id}.jpg") + +if __name__ == '__main__': + opts = get_args() + main(opts) diff --git a/Downstream/Spatial-Temporal-Action-Localization/start.sh b/Downstream/Spatial-Temporal-Action-Localization/start.sh new file mode 100644 index 0000000..c6ab577 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/start.sh @@ -0,0 +1 @@ +sbatch -N 1 --gres=gpu:1 --qos=gpugpu akeval.sh \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/train.sh b/Downstream/Spatial-Temporal-Action-Localization/train.sh new file mode 100644 index 0000000..a0f708a --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/train.sh @@ -0,0 +1,42 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 +export PATH=/mnt/lustre/share/gcc/gcc-5.3.0/bin/:$PATH +export LD_LIBRARY_PATH=/mnt/lustre/share/gcc/gmp-4.3.2/lib:/mnt/lustre/share/gcc/mpfr-2.4.2/lib:/mnt/lustre/share/gcc/mpc-0.8.1/lib:$LD_LIBRARY_PATH +export PATH=/mnt/cache/share/cuda-11.3/bin:$PATH +export PATH=/mnt/cache/xingsen/.local/bin:$PATH +export LD_LIBRARY_PATH=/mnt/cache/share/cuda-11.3/lib64:$LD_LIBRARY_PATH +MODEL_PATH='/mnt/cache/share_data/huangbingkun.vendor/model/vit_l_hybrid_pt_800e_k700_ft.pth' +OUTPUT_DIR='/mnt/lustre/xingsen/videoMAE_ckp/k700_2' +PARTITION=${PARTITION:-"video"} +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} +srun -p ${PARTITION} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --num_sample 1 \ + --input_size 224 \ + --save_ckpt_freq 1 \ + --num_frames 16 \ + --sampling_rate 4 \ + --opt adamw \ + --lr 0.00015 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 30 \ + --data_set "ava" \ + --val_freq 30 \ + --drop_path 0.2 \ + --enable_deepspeed \ + ${PY_ARGS} diff --git a/Downstream/Spatial-Temporal-Action-Localization/transforms.py b/Downstream/Spatial-Temporal-Action-Localization/transforms.py new file mode 100644 index 0000000..f16a737 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/transforms.py @@ -0,0 +1,206 @@ +import torch +import torchvision.transforms.functional as F +import warnings +import random +import numpy as np +import torchvision +from PIL import Image, ImageOps +import numbers + + +class GroupRandomCrop(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img_tuple): + img_group, label = img_tuple + + w, h = img_group[0].size + th, tw = self.size + + out_images = list() + + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + + for img in img_group: + assert(img.size[0] == w and img.size[1] == h) + if w == tw and h == th: + out_images.append(img) + else: + out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) + + return (out_images, label) + + +class GroupCenterCrop(object): + def __init__(self, size): + self.worker = torchvision.transforms.CenterCrop(size) + + def __call__(self, img_tuple): + img_group, label = img_tuple + return ([self.worker(img) for img in img_group], label) + + +class GroupNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor_tuple): + tensor, label = tensor_tuple + rep_mean = self.mean * (tensor.size()[0]//len(self.mean)) + rep_std = self.std * (tensor.size()[0]//len(self.std)) + + # TODO: make efficient + for t, m, s in zip(tensor, rep_mean, rep_std): + t.sub_(m).div_(s) + + return (tensor,label) + + +class GroupGrayScale(object): + def __init__(self, size): + self.worker = torchvision.transforms.Grayscale(size) + + def __call__(self, img_tuple): + img_group, label = img_tuple + return ([self.worker(img) for img in img_group], label) + + +class GroupScale(object): + """ Rescales the input PIL.Image to the given 'size'. + 'size' will be the size of the smaller edge. + For example, if height > width, then image will be + rescaled to (size * height / width, size) + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + self.worker = torchvision.transforms.Resize(size, interpolation) + + def __call__(self, img_tuple): + img_group, label = img_tuple + return ([self.worker(img) for img in img_group], label) + + +class GroupMultiScaleCrop(object): + + def __init__(self, input_size, scales=None, max_distort=1, fix_crop=True, more_fix_crop=True): + self.scales = scales if scales is not None else [1, 875, .75, .66] + self.max_distort = max_distort + self.fix_crop = fix_crop + self.more_fix_crop = more_fix_crop + self.input_size = input_size if not isinstance(input_size, int) else [input_size, input_size] + self.interpolation = Image.BILINEAR + + def __call__(self, img_tuple): + img_group, label = img_tuple + + im_size = img_group[0].size + + crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) + crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group] + ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) for img in crop_img_group] + return (ret_img_group, label) + + def _sample_crop_size(self, im_size): + image_w, image_h = im_size[0], im_size[1] + + # find a crop size + base_size = min(image_w, image_h) + crop_sizes = [int(base_size * x) for x in self.scales] + crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes] + crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes] + + pairs = [] + for i, h in enumerate(crop_h): + for j, w in enumerate(crop_w): + if abs(i - j) <= self.max_distort: + pairs.append((w, h)) + + crop_pair = random.choice(pairs) + if not self.fix_crop: + w_offset = random.randint(0, image_w - crop_pair[0]) + h_offset = random.randint(0, image_h - crop_pair[1]) + else: + w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1]) + + return crop_pair[0], crop_pair[1], w_offset, h_offset + + def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): + offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h) + return random.choice(offsets) + + @staticmethod + def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + ret = list() + ret.append((0, 0)) # upper left + ret.append((4 * w_step, 0)) # upper right + ret.append((0, 4 * h_step)) # lower left + ret.append((4 * w_step, 4 * h_step)) # lower right + ret.append((2 * w_step, 2 * h_step)) # center + + if more_fix_crop: + ret.append((0, 2 * h_step)) # center left + ret.append((4 * w_step, 2 * h_step)) # center right + ret.append((2 * w_step, 4 * h_step)) # lower center + ret.append((2 * w_step, 0 * h_step)) # upper center + + ret.append((1 * w_step, 1 * h_step)) # upper left quarter + ret.append((3 * w_step, 1 * h_step)) # upper right quarter + ret.append((1 * w_step, 3 * h_step)) # lower left quarter + ret.append((3 * w_step, 3 * h_step)) # lower righ quarter + return ret + + +class Stack(object): + + def __init__(self, roll=False): + self.roll = roll + + def __call__(self, img_tuple): + img_group, label = img_tuple + + if img_group[0].mode == 'L': + return (np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2), label) + elif img_group[0].mode == 'RGB': + if self.roll: + return (np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2), label) + else: + return (np.concatenate(img_group, axis=2), label) + + +class ToTorchFormatTensor(object): + """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] + to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ + def __init__(self, div=True): + self.div = div + + def __call__(self, pic_tuple): + pic, label = pic_tuple + + if isinstance(pic, np.ndarray): + # handle numpy array + img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() + else: + # handle PIL Image + img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) + img = img.view(pic.size[1], pic.size[0], len(pic.mode)) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + return (img.float().div(255.) if self.div else img.float(), label) + + +class IdentityTransform(object): + + def __call__(self, data): + return data diff --git a/Downstream/Spatial-Temporal-Action-Localization/utils.py b/Downstream/Spatial-Temporal-Action-Localization/utils.py new file mode 100644 index 0000000..66ca90d --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/utils.py @@ -0,0 +1,535 @@ +import io +import os +import math +import time +import json +from collections import defaultdict, deque +import datetime +import numpy as np +from timm.utils import get_state_dict +from torch.utils.data._utils.collate import default_collate +from pathlib import Path +import subprocess +import torch +import torch.distributed as dist +from torch._six import inf +import random + +from tensorboardX import SummaryWriter + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +class TensorboardLogger(object): + def __init__(self, log_dir): + self.writer = SummaryWriter(logdir=log_dir) + self.step = 0 + + def set_step(self, step=None): + if step is not None: + self.step = step + else: + self.step += 1 + + def update(self, head='scalar', step=None, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step) + + def flush(self): + self.writer.flush() + +def seed_worker(worker_id): + worker_seed = torch.initial_seed() % 2**32 + np.random.seed(worker_seed) + random.seed(worker_seed) + +def _load_checkpoint_for_ema(model_ema, checkpoint): + """ + Workaround for ModelEma._load_checkpoint to accept an already-loaded object + """ + mem_file = io.BytesIO() + torch.save(checkpoint, mem_file) + mem_file.seek(0) + model_ema._load_checkpoint(mem_file) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = int(os.environ['SLURM_LOCALID']) + args.world_size = int(os.environ['SLURM_NTASKS']) + os.environ['RANK'] = str(args.rank) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['WORLD_SIZE'] = str(args.world_size) + + node_list = os.environ['SLURM_NODELIST'] + addr = subprocess.getoutput( + f'scontrol show hostname {node_list} | head -n1') + if 'MASTER_ADDR' not in os.environ: + os.environ['MASTER_ADDR'] = addr + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + # assert torch.distributed.is_initialized() + setup_for_distributed(args.rank == 0) + + +def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"): + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get( + prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(model, prefix=prefix) + + warn_missing_keys = [] + ignore_missing_keys = [] + for key in missing_keys: + keep_flag = True + for ignore_key in ignore_missing.split('|'): + if ignore_key in key: + keep_flag = False + break + if keep_flag: + warn_missing_keys.append(key) + else: + ignore_missing_keys.append(key) + + missing_keys = warn_missing_keys + + if len(missing_keys) > 0: + print("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + print("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(ignore_missing_keys) > 0: + print("Ignored weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, ignore_missing_keys)) + if len(error_msgs) > 0: + print('\n'.join(error_msgs)) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + return total_norm + + +def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, + start_warmup_value=0, warmup_steps=-1): + warmup_schedule = np.array([]) + warmup_iters = warmup_epochs * niter_per_ep + if warmup_steps > 0: + warmup_iters = warmup_steps + print("Set warmup steps = %d" % warmup_iters) + if warmup_epochs > 0: + warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) + + iters = np.arange(epochs * niter_per_ep - warmup_iters) + schedule = np.array( + [final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters]) + + schedule = np.concatenate((warmup_schedule, schedule)) + + assert len(schedule) == epochs * niter_per_ep + return schedule + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + if loss_scaler is not None: + checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, + } + + if model_ema is not None: + to_save['model_ema'] = get_state_dict(model_ema) + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + if model_ema is not None: + client_state['model_ema'] = get_state_dict(model_ema) + model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) + + +def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): + output_dir = Path(args.output_dir) + if loss_scaler is not None: + # torch.amp + if args.auto_resume and len(args.resume) == 0: + import glob + all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) + latest_ckpt = -1 + for ckpt in all_checkpoints: + t = ckpt.split('-')[-1].split('.')[0] + if t.isdigit(): + latest_ckpt = max(int(t), latest_ckpt) + if latest_ckpt >= 0: + args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt) + print("Auto resume checkpoint: %s" % args.resume) + + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + print("Resume checkpoint %s" % args.resume) + if 'optimizer' in checkpoint and 'epoch' in checkpoint: + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + if hasattr(args, 'model_ema') and args.model_ema: + _load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + print("With optim & sched!") + else: + # deepspeed, only support '--auto_resume'. + if args.auto_resume: + import glob + all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*')) + latest_ckpt = -1 + for ckpt in all_checkpoints: + t = ckpt.split('-')[-1].split('.')[0] + if t.isdigit(): + latest_ckpt = max(int(t), latest_ckpt) + if latest_ckpt >= 0: + args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt) + print("Auto resume checkpoint: %d" % latest_ckpt) + _, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt) + args.start_epoch = client_states['epoch'] + 1 + if model_ema is not None: + if args.model_ema: + _load_checkpoint_for_ema(model_ema, client_states['model_ema']) + + +def create_ds_config(args): + args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json") + with open(args.deepspeed_config, mode="w") as writer: + ds_config = { + "train_batch_size": args.batch_size * args.update_freq * get_world_size(), + "train_micro_batch_size_per_gpu": args.batch_size, + "steps_per_print": 1000, + "optimizer": { + "type": "Adam", + "adam_w_mode": True, + "params": { + "lr": args.lr, + "weight_decay": args.weight_decay, + "bias_correction": True, + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-8 + } + }, + "fp16": { + "enabled": True, + "loss_scale": 0, + "initial_scale_power": 7, + "loss_scale_window": 128 + } + } + + writer.write(json.dumps(ds_config, indent=2)) + +def multiple_samples_collate(batch, fold=False): + """ + Collate function for repeated augmentation. Each instance in the batch has + more than one sample. + Args: + batch (tuple or list): data batch to collate. + Returns: + (tuple): collated data batch. + """ + inputs, labels, video_idx, extra_data = zip(*batch) + inputs = [item for sublist in inputs for item in sublist] + labels = [item for sublist in labels for item in sublist] + video_idx = [item for sublist in video_idx for item in sublist] + inputs, labels, video_idx, extra_data = ( + default_collate(inputs), + default_collate(labels), + default_collate(video_idx), + default_collate(extra_data), + ) + if fold: + return [inputs], labels, video_idx, extra_data + else: + return inputs, labels, video_idx, extra_data diff --git a/Downstream/Spatial-Temporal-Action-Localization/v100_config.json b/Downstream/Spatial-Temporal-Action-Localization/v100_config.json new file mode 100644 index 0000000..c1e3891 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/v100_config.json @@ -0,0 +1,20 @@ +{ + "Token":"qrgh5vShjDx4lP4rHIVI8A", + "business_flag": "TEG_AILab_CVC_chongqing", + "model_local_file_path": "/apdcephfs/share_1290939/miletchen/work_dir/clip/VideoMAE_ava", + "host_num": 1, + "host_gpu_num": 8, + "task_flag": "ava_videomae_vit_base16_T16x4_bs64_lr_5e-4_ep20_ft_v100", + "image_full_name": "mirrors.tencent.com/video_ai_lab/video_v100_deepspeed:v1", + "GPUName": "V100", + "cuda_version": "10.1", + "enable_ceph_metadata_cache": true, + "exec_start_in_all_mpi_pods": true, + "enable_rdma": true, + "is_elasticity": false, + "is_store_core_file": true, + "accelerate_params": { + "enable_optimizations": true, + "application_scenario": "Torch-CV" + } +} diff --git a/Downstream/Spatial-Temporal-Action-Localization/video_transforms.py b/Downstream/Spatial-Temporal-Action-Localization/video_transforms.py new file mode 100644 index 0000000..d2b26b3 --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/video_transforms.py @@ -0,0 +1,1281 @@ +#!/usr/bin/env python3 +import math +import numpy as np +import random +import torch +import torchvision.transforms.functional as F +from PIL import Image +from torchvision import transforms + +from rand_augment import rand_augment_transform +from random_erasing import RandomErasing + + +import numbers +import PIL +import torchvision + +import functional as FF + +_pil_interpolation_to_str = { + Image.NEAREST: "PIL.Image.NEAREST", + Image.BILINEAR: "PIL.Image.BILINEAR", + Image.BICUBIC: "PIL.Image.BICUBIC", + Image.LANCZOS: "PIL.Image.LANCZOS", + Image.HAMMING: "PIL.Image.HAMMING", + Image.BOX: "PIL.Image.BOX", +} + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _pil_interp(method): + if method == "bicubic": + return Image.BICUBIC + elif method == "lanczos": + return Image.LANCZOS + elif method == "hamming": + return Image.HAMMING + else: + return Image.BILINEAR + + +def random_short_side_scale_jitter( + images, min_size, max_size, boxes=None, inverse_uniform_sampling=False +): + """ + Perform a spatial short scale jittering on the given images and + corresponding boxes. + Args: + images (tensor): images to perform scale jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + min_size (int): the minimal size to scale the frames. + max_size (int): the maximal size to scale the frames. + boxes (ndarray): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, max_scale]. + Returns: + (tensor): the scaled images with dimension of + `num frames` x `channel` x `new height` x `new width`. + (ndarray or None): the scaled boxes with dimension of + `num boxes` x 4. + """ + if inverse_uniform_sampling: + size = int( + round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) + ) + else: + size = int(round(np.random.uniform(min_size, max_size))) + + height = images.shape[2] + width = images.shape[3] + if (width <= height and width == size) or ( + height <= width and height == size + ): + return images, boxes + new_width = size + new_height = size + if width < height: + new_height = int(math.floor((float(height) / width) * size)) + if boxes is not None: + boxes = boxes * float(new_height) / height + else: + new_width = int(math.floor((float(width) / height) * size)) + if boxes is not None: + boxes = boxes * float(new_width) / width + + return ( + torch.nn.functional.interpolate( + images, + size=(new_height, new_width), + mode="bilinear", + align_corners=False, + ), + boxes, + ) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Peform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to peform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def random_crop(images, size, boxes=None): + """ + Perform random spatial crop on the given images and corresponding boxes. + Args: + images (tensor): images to perform random crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): the size of height and width to crop on the image. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + cropped (tensor): cropped images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + if images.shape[2] == size and images.shape[3] == size: + return images + height = images.shape[2] + width = images.shape[3] + y_offset = 0 + if height > size: + y_offset = int(np.random.randint(0, height - size)) + x_offset = 0 + if width > size: + x_offset = int(np.random.randint(0, width - size)) + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + + return cropped, cropped_boxes + + +def horizontal_flip(prob, images, boxes=None): + """ + Perform horizontal flip on the given images and corresponding boxes. + Args: + prob (float): probility to flip the images. + images (tensor): images to perform horizontal flip, the dimension is + `num frames` x `channel` x `height` x `width`. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + images (tensor): images with dimension of + `num frames` x `channel` x `height` x `width`. + flipped_boxes (ndarray or None): the flipped boxes with dimension of + `num boxes` x 4. + """ + if boxes is None: + flipped_boxes = None + else: + flipped_boxes = boxes.copy() + + if np.random.uniform() < prob: + images = images.flip((-1)) + + if len(images.shape) == 3: + width = images.shape[2] + elif len(images.shape) == 4: + width = images.shape[3] + else: + raise NotImplementedError("Dimension does not supported") + if boxes is not None: + flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 + + return images, flipped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +def clip_boxes_to_image(boxes, height, width): + """ + Clip an array of boxes to an image with the given height and width. + Args: + boxes (ndarray): bounding boxes to perform clipping. + Dimension is `num boxes` x 4. + height (int): given image height. + width (int): given image width. + Returns: + clipped_boxes (ndarray): the clipped boxes with dimension of + `num boxes` x 4. + """ + clipped_boxes = boxes.copy() + clipped_boxes[:, [0, 2]] = np.minimum( + width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) + ) + clipped_boxes[:, [1, 3]] = np.minimum( + height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) + ) + return clipped_boxes + + +def blend(images1, images2, alpha): + """ + Blend two images with a given weight alpha. + Args: + images1 (tensor): the first images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + images2 (tensor): the second images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + alpha (float): the blending weight. + Returns: + (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + return images1 * alpha + images2 * (1 - alpha) + + +def grayscale(images): + """ + Get the grayscale for the input images. The channels of images should be + in order BGR. + Args: + images (tensor): the input images for getting grayscale. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + img_gray (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + # R -> 0.299, G -> 0.587, B -> 0.114. + img_gray = torch.tensor(images) + gray_channel = ( + 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] + ) + img_gray[:, 0] = gray_channel + img_gray[:, 1] = gray_channel + img_gray[:, 2] = gray_channel + return img_gray + + +def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): + """ + Perfrom a color jittering on the input images. The channels of images + should be in order BGR. + Args: + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + img_brightness (float): jitter ratio for brightness. + img_contrast (float): jitter ratio for contrast. + img_saturation (float): jitter ratio for saturation. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + + jitter = [] + if img_brightness != 0: + jitter.append("brightness") + if img_contrast != 0: + jitter.append("contrast") + if img_saturation != 0: + jitter.append("saturation") + + if len(jitter) > 0: + order = np.random.permutation(np.arange(len(jitter))) + for idx in range(0, len(jitter)): + if jitter[order[idx]] == "brightness": + images = brightness_jitter(img_brightness, images) + elif jitter[order[idx]] == "contrast": + images = contrast_jitter(img_contrast, images) + elif jitter[order[idx]] == "saturation": + images = saturation_jitter(img_saturation, images) + return images + + +def brightness_jitter(var, images): + """ + Perfrom brightness jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for brightness. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_bright = torch.zeros(images.shape) + images = blend(images, img_bright, alpha) + return images + + +def contrast_jitter(var, images): + """ + Perfrom contrast jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for contrast. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_gray = grayscale(images) + img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) + images = blend(images, img_gray, alpha) + return images + + +def saturation_jitter(var, images): + """ + Perfrom saturation jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for saturation. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + img_gray = grayscale(images) + images = blend(images, img_gray, alpha) + + return images + + +def lighting_jitter(images, alphastd, eigval, eigvec): + """ + Perform AlexNet-style PCA jitter on the given images. + Args: + images (tensor): images to perform lighting jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + alphastd (float): jitter ratio for PCA jitter. + eigval (list): eigenvalues for PCA jitter. + eigvec (list[list]): eigenvectors for PCA jitter. + Returns: + out_images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if alphastd == 0: + return images + # generate alpha1, alpha2, alpha3. + alpha = np.random.normal(0, alphastd, size=(1, 3)) + eig_vec = np.array(eigvec) + eig_val = np.reshape(eigval, (1, 3)) + rgb = np.sum( + eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), + axis=1, + ) + out_images = torch.zeros_like(images) + if len(images.shape) == 3: + # C H W + channel_dim = 0 + elif len(images.shape) == 4: + # T C H W + channel_dim = 1 + else: + raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") + + for idx in range(images.shape[channel_dim]): + # C H W + if len(images.shape) == 3: + out_images[idx] = images[idx] + rgb[2 - idx] + # T C H W + elif len(images.shape) == 4: + out_images[:, idx] = images[:, idx] + rgb[2 - idx] + else: + raise NotImplementedError( + f"Unsupported dimension {len(images.shape)}" + ) + + return out_images + + +def color_normalization(images, mean, stddev): + """ + Perform color nomration on the given images. + Args: + images (tensor): images to perform color normalization. Dimension is + `num frames` x `channel` x `height` x `width`. + mean (list): mean values for normalization. + stddev (list): standard deviations for normalization. + + Returns: + out_images (tensor): the noramlized images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if len(images.shape) == 3: + assert ( + len(mean) == images.shape[0] + ), "channel mean not computed properly" + assert ( + len(stddev) == images.shape[0] + ), "channel stddev not computed properly" + elif len(images.shape) == 4: + assert ( + len(mean) == images.shape[1] + ), "channel mean not computed properly" + assert ( + len(stddev) == images.shape[1] + ), "channel stddev not computed properly" + else: + raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") + + out_images = torch.zeros_like(images) + for idx in range(len(mean)): + # C H W + if len(images.shape) == 3: + out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] + elif len(images.shape) == 4: + out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] + else: + raise NotImplementedError( + f"Unsupported dimension {len(images.shape)}" + ) + return out_images + + +def _get_param_spatial_crop( + scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False +): + """ + Given scale, ratio, height and width, return sampled coordinates of the videos. + """ + for _ in range(num_repeat): + area = height * width + target_area = random.uniform(*scale) * area + if log_scale: + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + else: + aspect_ratio = random.uniform(*ratio) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if np.random.uniform() < 0.5 and switch_hw: + w, h = h, w + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + +def random_resized_crop( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + Crop the given images to random size and aspect ratio. A crop of random + size (default: of 0.08 to 1.0) of the original size and a random aspect + ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This + crop is finally resized to given size. This is popularly used to train the + Inception networks. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + cropped = images[:, :, i : i + h, j : j + w] + return torch.nn.functional.interpolate( + cropped, + size=(target_height, target_width), + mode="bilinear", + align_corners=False, + ) + + +def random_resized_crop_with_shift( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + This is similar to random_resized_crop. However, it samples two different + boxes (for cropping) for the first and last frame. It then linearly + interpolates the two boxes for other frames. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + t = images.shape[1] + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) + i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] + j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] + h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] + w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] + out = torch.zeros((3, t, target_height, target_width)) + for ind in range(t): + out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate( + images[ + :, + ind : ind + 1, + i_s[ind] : i_s[ind] + h_s[ind], + j_s[ind] : j_s[ind] + w_s[ind], + ], + size=(target_height, target_width), + mode="bilinear", + align_corners=False, + ) + return out + + +def create_random_augment( + input_size, + auto_augment=None, + interpolation="bilinear", +): + """ + Get video randaug transform. + + Args: + input_size: The size of the input video in tuple. + auto_augment: Parameters for randaug. An example: + "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number + of operations to apply). + interpolation: Interpolation method. + """ + if isinstance(input_size, tuple): + img_size = input_size[-2:] + else: + img_size = input_size + + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = {"translate_const": int(img_size_min * 0.45)} + if interpolation and interpolation != "random": + aa_params["interpolation"] = _pil_interp(interpolation) + if auto_augment.startswith("rand"): + return transforms.Compose( + [rand_augment_transform(auto_augment, aa_params)] + ) + raise NotImplementedError + + +def random_sized_crop_img( + im, + size, + jitter_scale=(0.08, 1.0), + jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), + max_iter=10, +): + """ + Performs Inception-style cropping (used for training). + """ + assert ( + len(im.shape) == 3 + ), "Currently only support image for random_sized_crop" + h, w = im.shape[1:3] + i, j, h, w = _get_param_spatial_crop( + scale=jitter_scale, + ratio=jitter_aspect, + height=h, + width=w, + num_repeat=max_iter, + log_scale=False, + switch_hw=True, + ) + cropped = im[:, i : i + h, j : j + w] + return torch.nn.functional.interpolate( + cropped.unsqueeze(0), + size=(size, size), + mode="bilinear", + align_corners=False, + ).squeeze(0) + + +# The following code are modified based on timm lib, we will replace the following +# contents with dependency from PyTorchVideo. +# https://github.com/facebookresearch/pytorchvideo +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation="bilinear", + ): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + print("range should be of kind (min, max)") + + if interpolation == "random": + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for _ in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = " ".join( + [_pil_interpolation_to_str[x] for x in self.interpolation] + ) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + "(size={0}".format(self.size) + format_string += ", scale={0}".format( + tuple(round(s, 4) for s in self.scale) + ) + format_string += ", ratio={0}".format( + tuple(round(r, 4) for r in self.ratio) + ) + format_string += ", interpolation={0})".format(interpolate_str) + return format_string + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0.0, + color_jitter=0.4, + auto_augment=None, + interpolation="random", + use_prefetcher=False, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + re_prob=0.0, + re_mode="const", + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + if isinstance(img_size, tuple): + img_size = img_size[-2:] + else: + img_size = img_size + + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple( + ratio or (3.0 / 4.0, 4.0 / 3.0) + ) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation( + img_size, scale=scale, ratio=ratio, interpolation=interpolation + ) + ] + if hflip > 0.0: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.0: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != "random": + aa_params["interpolation"] = _pil_interp(interpolation) + if auto_augment.startswith("rand"): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith("augmix"): + raise NotImplementedError("Augmix not implemented") + else: + raise NotImplementedError("Auto aug not implemented") + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), + ] + if re_prob > 0.0: + final_tfl.append( + RandomErasing( + re_prob, + mode=re_mode, + max_count=re_count, + num_splits=re_num_splits, + device="cpu", + cube=False, + ) + ) + + if separate: + return ( + transforms.Compose(primary_tfl), + transforms.Compose(secondary_tfl), + transforms.Compose(final_tfl), + ) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + +############################################################################################################ +############################################################################################################ + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = FF.resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = FF.resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ThreeCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w != im_w and h != im_h: + clip = FF.resize_clip(clip, self.size, interpolation="bilinear") + im_h, im_w, im_c = clip[0].shape + + step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0) + cropped = [] + for i in range(3): + if (im_h > self.size[0]): + x1 = 0 + y1 = i * step + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + else: + x1 = i * step + y1 = 0 + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + return cropped + + +class RandomRotation(object): + """Rotate entire clip randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + import skimage + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img transform function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This transform acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip. + """ + return FF.normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) diff --git a/Downstream/Spatial-Temporal-Action-Localization/vis.sh b/Downstream/Spatial-Temporal-Action-Localization/vis.sh new file mode 100644 index 0000000..c2c0b0e --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/vis.sh @@ -0,0 +1,13 @@ +# Set the path to save video +OUTPUT_DIR='TODO/VideoMAE/demo/vis_k400_1_0.9' +# path to video for visualization +VIDEO_PATH='TODO/TODO.mp4' +# path to pretrain model +MODEL_PATH='TODO/videomae_pretrain_base_patch16_224_frame_16x4_tube_mask_ratio_0.9_e1600/checkpoint-1599.pth' + +python3 run_videomae_vis.py \ + --mask_ratio 0.9 \ + --mask_type tube \ + --decoder_depth 4 \ + --model pretrain_videomae_base_patch16_224 \ + ${VIDEO_PATH} ${OUTPUT_DIR} ${MODEL_PATH} \ No newline at end of file diff --git a/Downstream/Spatial-Temporal-Action-Localization/volume_transforms.py b/Downstream/Spatial-Temporal-Action-Localization/volume_transforms.py new file mode 100644 index 0000000..4d33dad --- /dev/null +++ b/Downstream/Spatial-Temporal-Action-Localization/volume_transforms.py @@ -0,0 +1,131 @@ +import numpy as np +from PIL import Image +import torch + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format + """ + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255.0 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(tensor_clip, 255) + return tensor_clip + + +# Note this norms data to -1/1 +class ClipToTensor_K(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = (np_clip - 127.5) / 127.5 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor + """ + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor diff --git a/Downstream/Temporal-Action-Localization/INSTALL.md b/Downstream/Temporal-Action-Localization/INSTALL.md new file mode 100644 index 0000000..00a63fd --- /dev/null +++ b/Downstream/Temporal-Action-Localization/INSTALL.md @@ -0,0 +1,25 @@ +# Requirements + +- Linux +- Python 3.5+ +- PyTorch 1.10+ +- TensorBoard +- CUDA 11.0+ +- GCC 4.9+ +- NumPy 1.11+ +- PyYaml +- Pandas +- h5py +- joblib + +# Compilation + +Part of NMS is implemented in C++. The code can be compiled by + +```shell +cd ./libs/utils +python setup.py install --user +cd ../.. +``` + +The code should be recompiled every time you update PyTorch. diff --git a/Downstream/Temporal-Action-Localization/LICENSE b/Downstream/Temporal-Action-Localization/LICENSE new file mode 100644 index 0000000..5c9296f --- /dev/null +++ b/Downstream/Temporal-Action-Localization/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 VideoIntern + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Downstream/Temporal-Action-Localization/README.md b/Downstream/Temporal-Action-Localization/README.md new file mode 100644 index 0000000..5179bac --- /dev/null +++ b/Downstream/Temporal-Action-Localization/README.md @@ -0,0 +1,73 @@ +# Temporal-Action-Localization +The codebase of one downstream task (Temporal Action Localization ) based on our InternVideo. + + [**InternVideo: General Video Foundation Models via Generative and Discriminative Learning**](https://arxiv.org/abs/2212.03191) + +We use ViT-H from our InternVideo as backbones for feature extraction. In our experiment, ViT-H models are pretrained from Hybrid datasets. +As shown in Table, our InternVideo outperforms best than all the preview methods on these four TAL datasets. +Note that, our InternVideo achieves huge improvements in temporal action localization, especially in fine-grained TAL datasets such as THUMOS-14 and FineAction. + +|Backbone | Head | THUMOS-14 | ActivityNet-v1.3 | HACS | FineAction | +|:----:|:-----:|:----------------:|:-------:|:-------:|:-------:| +|I3D | ActionFormer | 66.80 |-| - |13.24| +|SlowFast | TCANet | - | - | 38.71 | - | +|TSP | ActionFormer | - | 36.60 | - | - | +|**InternVideo** | ActionFormer | **71.58** | **39.00** | **41.32** | **17.57** | + + + + +## Installation +* Follow INSTALL.md for installing necessary dependencies and compiling the code. +* The annotations for the used data can be downloaded from [here](https://pjlab-my.sharepoint.cn/:u:/g/personal/wangyi_pjlab_org_cn/EbMrA1ltSdBPttMho9pe0cYBshPWec7IKmYFTNMGvdmT1A?e=jsmerl). Unzip and put them in the folder `./data`. + + +## To Reproduce Our Results of InternVideo +**Download VideoMAE Features and Annotations** +* Download *thumos_feature* [this BaiduYun link](https://pan.baidu.com/s/1IVcnPWsZyF6rHEPkSzH5Bw). code:qzb1 + + + +* Download *anet_feature* [this BaiduYun link](https://pan.baidu.com/s/1Vjvnesm7WCGHwjrqQWNVDg). code:0elv + + + + +* Download *hacs_feature* [this BaiduYun link](https://pan.baidu.com/s/1j6GABMj0tY1OUxU2xzyB6g). code:qcnz + + + + +* Download *fineaction_feature* [this BaiduYun link](https://pan.baidu.com/s/1P5QQMuxcPiE2tn4ojQW7pA). code:v45q + + + +**Download UniformerV2 Features (soon)** + +**Details**: +The THUMOS-14 features are extracted from Video_MAE models pretrained on Kinetics using clips of `16 frames` at the video frame rate (`~30 fps`) and a stride of `4 frames`. This gives one feature vector per `4/30 ~= 0.1333` seconds. + +The ANet & HACS & FineAction features are extracted from Video_MAE models pretrained on Kinetics using clips of `16 frames` at the video frame rate (`~30 fps`) and a stride of `16 frames`. This gives one feature vector per `16/30 ~= 0.5333` seconds. + + + + + +**Training and Evaluation** +* Train the ActionFormer with InternVideo features. +* This will create a experiment folder under *./ckpt* that stores training config, logs, and checkpoints. +```shell +bash th14_run.sh +bash anet_run.sh +bash hacs_run.sh +bash fa_run.sh +``` + + + + +## Contact + +Any question about our FineAction, you can email me. (yi.liu1@siat.ac.cn) \ No newline at end of file diff --git a/Downstream/Temporal-Action-Localization/anet_run.sh b/Downstream/Temporal-Action-Localization/anet_run.sh new file mode 100644 index 0000000..7162d3a --- /dev/null +++ b/Downstream/Temporal-Action-Localization/anet_run.sh @@ -0,0 +1 @@ +python -u ./train_eval.py ./configs/anet.yaml --output anet_mae_h 2>&1 | tee anet_mae_h.log \ No newline at end of file diff --git a/Downstream/Temporal-Action-Localization/configs/anet.yaml b/Downstream/Temporal-Action-Localization/configs/anet.yaml new file mode 100644 index 0000000..a33d7cc --- /dev/null +++ b/Downstream/Temporal-Action-Localization/configs/anet.yaml @@ -0,0 +1,62 @@ +dataset_name: anet +train_split: ['training'] +val_split: ['validation'] +dataset: { + json_file: ./data/anet_1.3/annotations/anet1.3_tsp_filtered.json, + feat_folder: /mnt/petrelfs/liuyi/data_feature/anet_tsp, + file_prefix: v_, + file_ext: .npy, + num_classes: 1, + input_dim: 1280, + #512 +768 +1024=1536 +1280=1792 1024 + feat_stride: 16, + num_frames: 16, + default_fps: 15, + trunc_thresh: 0.5, + crop_ratio: [0.9, 1.0], + max_seq_len: 768, +} +model: { + fpn_type: identity, + max_buffer_len_factor: 1.0, + n_mha_win_size: [25, 25, 25, 25, 25, -1], + # shrink the model for reduced input feature channels + n_head: 4, + embd_dim: 256, + fpn_dim: 256, + head_dim: 256, + use_abs_pe: True, +} +# learning_rate: 0.001 +opt: { + learning_rate: 0.001, + epochs: 10, + weight_decay: 0.05, +} +loader: { + batch_size: 8, +} +train_cfg: { + init_loss_norm: 400, + clip_grad_l2norm: 1.0, + cls_prior_prob: 0.01, + center_sample: radius, + center_sample_radius: 1.5, + label_smoothing: 0.1, +} + +# similar to THUMOS +test_cfg: { + voting_thresh: 0.9, + pre_nms_topk: 2000, + # max of 100 predictions per video + max_seg_num: 100, + min_score: 0.001, + # score fusion + multiclass_nms: False, + nms_sigma : 0.75, + # ext_score_file: ./data/anet_1.3/annotations/cuhk_val_simp_share.json, + ext_score_file: ./data/anet_1.3/annotations/new_3ensemble_uniformerv2_large_only_global_anet_16x10x3.json + duration_thresh: 0.1, +} +output_folder: ./ckpt_anet/ diff --git a/Downstream/Temporal-Action-Localization/configs/fineaction.yaml b/Downstream/Temporal-Action-Localization/configs/fineaction.yaml new file mode 100644 index 0000000..b378231 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/configs/fineaction.yaml @@ -0,0 +1,68 @@ +dataset_name: fineaction +train_split: ['training'] +val_split: ['validation'] +dataset: { + json_file: ./data/fineaction/annotations/annotations_gt_af.json, + feat_folder: /mnt/petrelfs/liuyi/fineaction_i3d_100, + # feat_folder: ./data/anet_1.3/tsp_features, + file_prefix: v_, + file_ext: .csv, + num_classes: 1, + # input_dim: 4096 2048 1024, 1280 + input_dim: 1408, + feat_stride: -1, + num_frames: 16, + default_fps: 15, + trunc_thresh: 0.5, + crop_ratio: [0.9, 1.0], + # upsample the features to a fixed length of 192 + max_seq_len: 192, + force_upsampling: True, +} +model: { + fpn_type: identity, + max_buffer_len_factor: 1.0, + # 192 - 96 - 48 - 24 - 12 - 6 + n_mha_win_size: [7, 7, 7, 7, 7, -1], + # shrink the model for reduced input feature channels + n_head: 4, + embd_dim: 256, + fpn_dim: 256, + head_dim: 256, + use_abs_pe: True, +} +opt: { + learning_rate: 0.001, + epochs: 20, + weight_decay: 0.05, +} +loader: { + batch_size: 16, +} +train_cfg: { + init_loss_norm: 200, + clip_grad_l2norm: 1.0, + cls_prior_prob: 0.01, + center_sample: radius, + center_sample_radius: 1.5, + label_smoothing: 0.1, + droppath: 0.1, + loss_weight: 2.0, +} + +# similar to THUMOS +test_cfg: { + voting_thresh: 0.9, + pre_nms_topk: 2000, + # max of 100 predictions per video + max_seg_num: 100, + min_score: 0.001, + # score fusion + multiclass_nms: False, + nms_sigma : 0.75, + # ext_score_file: ./data/anet_1.3/annotations/cuhk_val_simp_share.json, + ext_score_file: ./data/fineaction/annotations/new_swinB_1x1x256_views2x3_max_label_avg_prob.json, + duration_thresh: 0.001, +} +# output_folder: ./ckpt/ +output_folder: ./ckpt_fineaction/ diff --git a/Downstream/Temporal-Action-Localization/configs/hacs.yaml b/Downstream/Temporal-Action-Localization/configs/hacs.yaml new file mode 100644 index 0000000..2fe8671 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/configs/hacs.yaml @@ -0,0 +1,62 @@ +dataset_name: hacs +train_split: ['training'] +val_split: ['validation'] +dataset: { + json_file: ./data/hacs/annotations/HACS_segments_v1.1.1_af.json, + feat_folder: /mnt/petrelfs/liuyi/data_feature/data_hacs/hacs_slowfast, + file_prefix: v_, + file_ext: .pkl, + num_classes: 1, + input_dim: 2560, + #2048+256=2304 + #256+768+1024 + feat_stride: 16, + num_frames: 16, + default_fps: 30, + trunc_thresh: 0.5, + crop_ratio: [0.9, 1.0], + max_seq_len: 2304, +} +model: { + fpn_type: identity, + max_buffer_len_factor: 1.0, + n_mha_win_size: [25, 25, 25, 25, 25, -1], + # shrink the model for reduced input feature channels + n_head: 4, + embd_dim: 256, + fpn_dim: 256, + head_dim: 256, + use_abs_pe: True, +} +opt: { + learning_rate: 0.001, + epochs: 20, + weight_decay: 0.05, +} +loader: { + batch_size: 16, +} +train_cfg: { + init_loss_norm: 400, + clip_grad_l2norm: 1.0, + cls_prior_prob: 0.01, + center_sample: radius, + center_sample_radius: 1.5, + label_smoothing: 0.1, +} + +# similar to THUMOS +test_cfg: { + voting_thresh: 0.7, + pre_nms_topk: 2000, + # max of 100 predictions per video + max_seg_num: 100, + min_score: 0.001, + # score fusion + multiclass_nms: False, + nms_sigma : 0.75, + # ext_score_file: ./data/hacs/annotations/validation94.32.json, + ext_score_file: ./data/hacs/annotations/tca_uniformerv2_large_only_global_anet_16x10x3.json, + duration_thresh: 0.1, +} +output_folder: ./ckpt_hacs/ diff --git a/Downstream/Temporal-Action-Localization/configs/thumos.yaml b/Downstream/Temporal-Action-Localization/configs/thumos.yaml new file mode 100644 index 0000000..ce93fa6 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/configs/thumos.yaml @@ -0,0 +1,53 @@ +dataset_name: thumos +train_split: ['validation'] +val_split: ['test'] +dataset: { + json_file: ./data/thumos/annotations/thumos14.json, + feat_folder: /mnt/petrelfs/liuyi/data_feature/data_th14/th14_i3d, + file_prefix: ~, + file_ext: .npy, + num_classes: 20, + input_dim: 1408, + #2048+768, 1024+768,1280+1024 + feat_stride: 4, + num_frames: 16, + # serve as data augmentation + trunc_thresh: 0.5, + crop_ratio: [0.9, 1.0], + max_seq_len: 2304, +} +model: { + fpn_type: identity, + max_buffer_len_factor: 6.0, + n_mha_win_size: 19, +} +opt: { + learning_rate: 0.0001, + epochs: 45, + weight_decay: 0.05, +} +loader: { + batch_size: 2, +} +train_cfg: { + init_loss_norm: 100, + clip_grad_l2norm: 1.0, + cls_prior_prob: 0.01, + center_sample: radius, + center_sample_radius: 1.5, +} +# the inference on THUMOS is quite different from EPIC-kitchens +# a major performance gain is achieved by fusing external classification scores +# doing so will require disable multiclass_nms and enable score fusion +test_cfg: { + voting_thresh: 0.7, + pre_nms_topk: 2000, + max_seg_num: 200, + min_score: 0.001, + # when using external scores, our model is generating "proposals" + # multiclass_nms: False, + # ext_score_file: ./data/thumos/annotations/thumos14_cls_scores.pkl, + # comment out L47-48 and uncomment L50 to disable score fusion + multiclass_nms: True, +} +output_folder: ./ckpt_th14/ diff --git a/Downstream/Temporal-Action-Localization/fa_run.sh b/Downstream/Temporal-Action-Localization/fa_run.sh new file mode 100644 index 0000000..5a70ec2 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/fa_run.sh @@ -0,0 +1 @@ +python -u ./train_eval.py ./configs/fineaction.yaml --output fa_mae_h 2>&1 | tee fa_mae_h.log \ No newline at end of file diff --git a/Downstream/Temporal-Action-Localization/hacs_run.sh b/Downstream/Temporal-Action-Localization/hacs_run.sh new file mode 100644 index 0000000..04cff38 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/hacs_run.sh @@ -0,0 +1 @@ +python -u ./train_eval.py ./configs/hacs.yaml --output hacs_mae_h 2>&1 | tee hacs_mae_h.log \ No newline at end of file diff --git a/Downstream/Temporal-Action-Localization/libs/core/__init__.py b/Downstream/Temporal-Action-Localization/libs/core/__init__.py new file mode 100644 index 0000000..b2d4e54 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/core/__init__.py @@ -0,0 +1,3 @@ +from .config import load_default_config, load_config + +__all__ = ['load_default_config', 'load_config'] diff --git a/Downstream/Temporal-Action-Localization/libs/core/__pycache__/__init__.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/core/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..daa7ea3 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/core/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/core/__pycache__/config.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/core/__pycache__/config.cpython-36.pyc new file mode 100644 index 0000000..84ab9a3 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/core/__pycache__/config.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/core/config.py b/Downstream/Temporal-Action-Localization/libs/core/config.py new file mode 100644 index 0000000..b91d673 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/core/config.py @@ -0,0 +1,158 @@ +import yaml + + +DEFAULTS = { + # random seed for reproducibility, a large number is preferred + "init_rand_seed": 1234567891, + # dataset loader, specify the dataset here + "dataset_name": "epic", + "devices": ['cuda:0'], # default: single gpu + "train_split": ('training', ), + "val_split": ('validation', ), + "model_name": "LocPointTransformer", + "dataset": { + # temporal stride of the feats + "feat_stride": 16, + # number of frames for each feat + "num_frames": 32, + # default fps, may vary across datasets; Set to none for read from json file + "default_fps": None, + # input feat dim + "input_dim": 2304, + # number of classes + "num_classes": 97, + # downsampling rate of features, 1 to use original resolution + "downsample_rate": 1, + # max sequence length during training + "max_seq_len": 2304, + # threshold for truncating an action + "trunc_thresh": 0.5, + # set to a tuple (e.g., (0.9, 1.0)) to enable random feature cropping + # might not be implemented by the dataloader + "crop_ratio": None, + # if true, force upsampling of the input features into a fixed size + # only used for ActivityNet + "force_upsampling": False, + }, + "loader": { + "batch_size": 8, + "num_workers": 4, + }, + # network architecture + "model": { + # type of backbone (convTransformer | conv) + "backbone_type": 'convTransformer', + # type of FPN (fpn | identity) + "fpn_type": "identity", + "backbone_arch": (2, 2, 5), + # scale factor between pyramid levels + "scale_factor": 2, + # regression range for pyramid levels + "regression_range": [(0, 4), (4, 8), (8, 16), (16, 32), (32, 64), (64, 10000)], + # number of heads in self-attention + "n_head": 4, + # window size for self attention; <=1 to use full seq (ie global attention) + "n_mha_win_size": -1, + # kernel size for embedding network + "embd_kernel_size": 3, + # (output) feature dim for embedding network + "embd_dim": 512, + # if attach group norm to embedding network + "embd_with_ln": True, + # feat dim for FPN + "fpn_dim": 512, + # if add ln at the end of fpn outputs + "fpn_with_ln": True, + # feat dim for head + "head_dim": 512, + # kernel size for reg/cls/center heads + "head_kernel_size": 3, + # number of layers in the head (including the final one) + "head_num_layers": 3, + # if attach group norm to heads + "head_with_ln": True, + # defines the max length of the buffered points + "max_buffer_len_factor": 6.0, + # disable abs position encoding (added to input embedding) + "use_abs_pe": False, + # use rel position encoding (added to self-attention) + "use_rel_pe": False, + }, + "train_cfg": { + # radius | none (if to use center sampling) + "center_sample": "radius", + "center_sample_radius": 1.5, + "loss_weight": 1.0, # on reg_loss, use -1 to enable auto balancing + "cls_prior_prob": 0.01, + "init_loss_norm": 2000, + # gradient cliping, not needed for pre-LN transformer + "clip_grad_l2norm": -1, + # cls head without data (a fix to epic-kitchens / thumos) + "head_empty_cls": [], + # dropout ratios for tranformers + "dropout": 0.0, + # ratio for drop path + "droppath": 0.1, + # if to use label smoothing (>0.0) + "label_smoothing": 0.0, + }, + "test_cfg": { + "pre_nms_thresh": 0.001, + "pre_nms_topk": 5000, + "iou_threshold": 0.1, + "min_score": 0.01, + "max_seg_num": 1000, + "nms_method": 'soft', # soft | hard | none + "nms_sigma" : 0.5, + "duration_thresh": 0.05, + "multiclass_nms": True, + "ext_score_file": None, + "voting_thresh" : 0.75, + }, + # optimizer (for training) + "opt": { + # solver + "type": "AdamW", # SGD or AdamW + # solver params + "momentum": 0.9, + "weight_decay": 0.0, + "learning_rate": 1e-3, + # excluding the warmup epochs + "epochs": 30, + # lr scheduler: cosine / multistep + "warmup": True, + "warmup_epochs": 5, + "schedule_type": "cosine", + # in #epochs excluding warmup + "schedule_steps": [], + "schedule_gamma": 0.1, + } +} + +def _merge(src, dst): + for k, v in src.items(): + if k in dst: + if isinstance(v, dict): + _merge(src[k], dst[k]) + else: + dst[k] = v + +def load_default_config(): + config = DEFAULTS + return config + +def _update_config(config): + # fill in derived fields + config["model"]["input_dim"] = config["dataset"]["input_dim"] + config["model"]["num_classes"] = config["dataset"]["num_classes"] + config["model"]["max_seq_len"] = config["dataset"]["max_seq_len"] + config["model"]["train_cfg"] = config["train_cfg"] + config["model"]["test_cfg"] = config["test_cfg"] + return config + +def load_config(config_file, defaults=DEFAULTS): + with open(config_file, "r") as fd: + config = yaml.load(fd, Loader=yaml.FullLoader) + _merge(defaults, config) + config = _update_config(config) + return config diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__init__.py b/Downstream/Temporal-Action-Localization/libs/datasets/__init__.py new file mode 100644 index 0000000..5dffa26 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/datasets/__init__.py @@ -0,0 +1,6 @@ +from .data_utils import worker_init_reset_seed, truncate_feats +from .datasets import make_dataset, make_data_loader +from . import thumos14, anet # other datasets go here + +__all__ = ['worker_init_reset_seed', 'truncate_feats', + 'make_dataset', 'make_data_loader'] diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/__init__.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..60a9903 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/anet.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/anet.cpython-36.pyc new file mode 100644 index 0000000..7af330e Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/anet.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/cotraining.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/cotraining.cpython-36.pyc new file mode 100644 index 0000000..45e28bd Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/cotraining.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/data_utils.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/data_utils.cpython-36.pyc new file mode 100644 index 0000000..cd73eec Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/data_utils.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/datasets.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/datasets.cpython-36.pyc new file mode 100644 index 0000000..ca298de Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/datasets.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/ego4d.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/ego4d.cpython-36.pyc new file mode 100644 index 0000000..09b22d3 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/ego4d.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/epic_kitchens.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/epic_kitchens.cpython-36.pyc new file mode 100644 index 0000000..cd189ed Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/epic_kitchens.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/fineaction.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/fineaction.cpython-36.pyc new file mode 100644 index 0000000..ed35c42 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/fineaction.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/hacs.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/hacs.cpython-36.pyc new file mode 100644 index 0000000..9322c93 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/hacs.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/hacs_anet.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/hacs_anet.cpython-36.pyc new file mode 100644 index 0000000..870e53b Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/hacs_anet.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/thumos14.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/thumos14.cpython-36.pyc new file mode 100644 index 0000000..1edde58 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/datasets/__pycache__/thumos14.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/anet.py b/Downstream/Temporal-Action-Localization/libs/datasets/anet.py new file mode 100644 index 0000000..b42473d --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/datasets/anet.py @@ -0,0 +1,269 @@ +from linecache import updatecache +import os +import json +import h5py +import numpy as np + +import torch +from torch.utils.data import Dataset +from torch.nn import functional as F + +from .datasets import register_dataset +from .data_utils import truncate_feats +from ..utils import remove_duplicate_annotations + +import pickle + +import io +from petrel_client.client import Client +client = Client() + +@register_dataset("anet") +class ActivityNetDataset(Dataset): + def __init__( + self, + is_training, # if in training mode + split, # split, a tuple/list allowing concat of subsets + feat_folder, # folder for features + mae_feat_folder, + json_file, # json file for annotations + feat_stride, # temporal stride of the feats + num_frames, # number of frames for each feat + default_fps, # default fps + downsample_rate, # downsample rate for feats + max_seq_len, # maximum sequence length during training + trunc_thresh, # threshold for truncate an action segment + crop_ratio, # a tuple (e.g., (0.9, 1.0)) for random cropping + input_dim, # input feat dim + num_classes, # number of action categories + file_prefix, # feature file prefix if any + file_ext, # feature file extension if any + force_upsampling # force to upsample to max_seq_len + ): + # file path + # assert os.path.exists(feat_folder) and os.path.exists(json_file) + # assert client.isdir(feat_folder) and os.path.exists(json_file) + # assert isinstance(split, tuple) or isinstance(split, list) + # assert crop_ratio == None or len(crop_ratio) == 2 + self.feat_folder = feat_folder + self.mae_feat_folder = mae_feat_folder + self.use_hdf5 = '.hdf5' in feat_folder + if file_prefix is not None: + self.file_prefix = file_prefix + else: + self.file_prefix = '' + self.file_ext = file_ext + self.json_file = json_file + + # anet uses fixed length features, make sure there is no downsampling + self.force_upsampling = force_upsampling + + # split / training mode + self.split = split + self.is_training = is_training + + + # self.i3d_feature_list=list(self.i3d_feature.keys()) + + # features meta info + self.feat_stride = feat_stride + self.num_frames = num_frames + self.input_dim = input_dim + self.default_fps = default_fps + self.downsample_rate = downsample_rate + self.max_seq_len = max_seq_len + self.trunc_thresh = trunc_thresh + self.num_classes = num_classes + self.label_dict = None + self.crop_ratio = crop_ratio + + # load database and select the subset + dict_db, label_dict = self._load_json_db(self.json_file) + # proposal vs action categories + assert (num_classes == 1) or (len(label_dict) == num_classes) + self.data_list = dict_db + self.label_dict = label_dict + + # dataset specific attributes + self.db_attributes = { + 'dataset_name': 'ActivityNet 1.3', + 'tiou_thresholds': np.linspace(0.5, 0.95, 10), + 'empty_label_ids': [] + } + + + + + + + def get_attributes(self): + return self.db_attributes + + def _load_json_db(self, json_file): + # load database and select the subset + with open(json_file, 'r') as fid: + json_data = json.load(fid) + json_db = json_data['database'] + + # if label_dict is not available + if self.label_dict is None: + label_dict = {} + for key, value in json_db.items(): + for act in value['annotations']: + label_dict[act['label']] = act['label_id'] + + + # json.dump(label_dict,open('label_dict.json','w')) + + + # fill in the db (immutable afterwards) + dict_db = tuple() + for key, value in json_db.items(): + # skip the video if not in the split + if value['subset'].lower() not in self.split: + continue + + # if 'v_'+key not in self.i3d_feature_list: + # continue + + # get fps if available + if self.default_fps is not None: + fps = self.default_fps + elif 'fps' in value: + fps = value['fps'] + else: + assert False, "Unknown video FPS." + duration = value['duration'] + + # get annotations if available + if ('annotations' in value) and (len(value['annotations']) > 0): + valid_acts = remove_duplicate_annotations(value['annotations']) + num_acts = len(valid_acts) + segments = np.zeros([num_acts, 2], dtype=np.float32) + labels = np.zeros([num_acts, ], dtype=np.int64) + for idx, act in enumerate(valid_acts): + segments[idx][0] = act['segment'][0] + segments[idx][1] = act['segment'][1] + if self.num_classes == 1: + labels[idx] = 0 + else: + labels[idx] = label_dict[act['label']] + else: + segments = None + labels = None + dict_db += ({'id': key, + 'fps' : fps, + 'duration' : duration, + 'segments' : segments, + 'labels' : labels + }, ) + + return dict_db, label_dict + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + # directly return a (truncated) data point (so it is very fast!) + # auto batching will be disabled in the subsequent dataloader + # instead the model will need to decide how to batch / preporcess the data + video_item = self.data_list[idx] + + + filename = os.path.join(self.feat_folder, self.file_prefix + video_item['id'] + self.file_ext) + + feats_tsp = np.load(filename).astype(np.float32) + + + + # feats_mae=pickle.load(open(os.path.join(self.mae_feat_folder,self.file_prefix + video_item['id']+'.pkl'),'rb')) + feats_mae=np.load(os.path.join(self.mae_feat_folder,self.file_prefix + video_item['id']+'.npy')).astype(np.float32) + feats_mae = F.interpolate(torch.from_numpy(feats_mae).permute(1,0).unsqueeze(0), size=feats_tsp.shape[0], mode='linear',align_corners=False)[0,...].permute(1,0) + feats_mae = feats_mae.numpy() + + + + # feats_clip=np.load(os.path.join('/mnt/petrelfs/liuyi/anet_clipk700/',self.file_prefix + video_item['id']+'.npy')).astype(np.float32) + # feats_clip = F.interpolate(torch.from_numpy(feats_clip).permute(1,0).unsqueeze(0), size=feats_tsp.shape[0], mode='linear',align_corners=False)[0,...].permute(1,0) + # feats_clip = feats_clip.numpy() + + # feats = np.concatenate([feats_clip, feats_mae], axis=1) + + feats=feats_mae + + + # we support both fixed length features / variable length features + if self.feat_stride > 0: + # var length features + feat_stride, num_frames = self.feat_stride, self.num_frames + # only apply down sampling here + if self.downsample_rate > 1: + feats = feats[::self.downsample_rate, :] + feat_stride = self.feat_stride * self.downsample_rate + else: + # deal with fixed length feature, recompute feat_stride, num_frames + seq_len = feats.shape[0] + assert seq_len <= self.max_seq_len + if self.force_upsampling: + # reset to max_seq_len + seq_len = self.max_seq_len + feat_stride = video_item['duration'] * video_item['fps'] / seq_len + # center the features + num_frames = feat_stride * self.num_frames + + # T x C -> C x T + feats = torch.from_numpy(np.ascontiguousarray(feats.transpose())) + + # resize the features if needed + if (self.feat_stride <= 0) and (feats.shape[-1] != self.max_seq_len) and self.force_upsampling: + resize_feats = F.interpolate( + feats.unsqueeze(0), + size=self.max_seq_len, + mode='linear', + align_corners=False + ) + feats = resize_feats.squeeze(0) + + # convert time stamp (in second) into temporal feature grids + # ok to have small negative values here + if video_item['segments'] is not None: + segments = torch.from_numpy( + (video_item['segments'] * video_item['fps'] - 0.5 * num_frames) / feat_stride + ) + labels = torch.from_numpy(video_item['labels']) + # for activity net, we have a few videos with a bunch of missing frames + # here is a quick fix for training + if self.is_training: + feat_len = feats.shape[1] + valid_seg_list, valid_label_list = [], [] + for seg, label in zip(segments, labels): + if seg[0] >= feat_len: + # skip an action outside of the feature map + continue + # truncate an action boundary + valid_seg_list.append(seg.clamp(max=feat_len)) + # some weird bug here if not converting to size 1 tensor + valid_label_list.append(label.view(1)) + segments = torch.stack(valid_seg_list, dim=0) + labels = torch.cat(valid_label_list) + else: + segments, labels = None, None + + # return a data dict + data_dict = {'video_id' : video_item['id'], + 'feats' : feats, # C x T + 'segments' : segments, # N x 2 + 'labels' : labels, # N + 'fps' : video_item['fps'], + 'duration' : video_item['duration'], + 'feat_stride' : feat_stride, + 'feat_num_frames' : num_frames} + + # no truncation is needed + # truncate the features during training + if self.is_training and (segments is not None) and (self.feat_stride > 0): + data_dict = truncate_feats( + data_dict, self.max_seq_len, self.trunc_thresh, self.crop_ratio + ) + + return data_dict diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/data_utils.py b/Downstream/Temporal-Action-Localization/libs/datasets/data_utils.py new file mode 100644 index 0000000..572d17d --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/datasets/data_utils.py @@ -0,0 +1,111 @@ +import os +import copy +import random +import numpy as np +import random +import torch + + +def trivial_batch_collator(batch): + """ + A batch collator that does nothing + """ + return batch + +def worker_init_reset_seed(worker_id): + """ + Reset random seed for each worker + """ + seed = torch.initial_seed() % 2 ** 31 + np.random.seed(seed) + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + +def truncate_feats( + data_dict, + max_seq_len, + trunc_thresh, + crop_ratio=None, + max_num_trials=200, + has_action=True, + no_trunc=False +): + """ + Truncate feats and time stamps in a dict item + + data_dict = {'video_id' : str + 'feats' : Tensor C x T + 'segments' : Tensor N x 2 (in feature grid) + 'labels' : Tensor N + 'fps' : float + 'feat_stride' : int + 'feat_num_frames' : in + + """ + # get the meta info + feat_len = data_dict['feats'].shape[1] + num_segs = data_dict['segments'].shape[0] + + # seq_len < max_seq_len + if feat_len <= max_seq_len: + # do nothing + if crop_ratio == None: + return data_dict + # randomly crop the seq by setting max_seq_len to a value in [l, r] + else: + max_seq_len = random.randint( + max(round(crop_ratio[0] * feat_len), 1), + min(round(crop_ratio[1] * feat_len), feat_len), + ) + # # corner case + if feat_len == max_seq_len: + return data_dict + + # otherwise, deep copy the dict + data_dict = copy.deepcopy(data_dict) + + # try a few times till a valid truncation with at least one action + for _ in range(max_num_trials): + + # sample a random truncation of the video feats + st = random.randint(0, feat_len - max_seq_len) + ed = st + max_seq_len + window = torch.as_tensor([st, ed], dtype=torch.float32) + + # compute the intersection between the sampled window and all segments + window = window[None].repeat(num_segs, 1) + left = torch.maximum(window[:, 0], data_dict['segments'][:, 0]) + right = torch.minimum(window[:, 1], data_dict['segments'][:, 1]) + inter = (right - left).clamp(min=0) + area_segs = torch.abs( + data_dict['segments'][:, 1] - data_dict['segments'][:, 0]) + inter_ratio = inter / area_segs + + # only select those segments over the thresh + seg_idx = (inter_ratio >= trunc_thresh) + + if no_trunc: + # with at least one action and not truncating any actions + seg_trunc_idx = torch.logical_and( + (inter_ratio > 0.0), (inter_ratio < 1.0) + ) + if (seg_idx.sum().item() > 0) and (seg_trunc_idx.sum().item() == 0): + break + elif has_action: + # with at least one action + if seg_idx.sum().item() > 0: + break + else: + # without any constraints + break + + # feats: C x T + data_dict['feats'] = data_dict['feats'][:, st:ed].clone() + # segments: N x 2 in feature grids + data_dict['segments'] = torch.stack((left[seg_idx], right[seg_idx]), dim=1) + # shift the time stamps due to truncation + data_dict['segments'] = data_dict['segments'] - st + # labels: N + data_dict['labels'] = data_dict['labels'][seg_idx].clone() + + return data_dict diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/datasets.py b/Downstream/Temporal-Action-Localization/libs/datasets/datasets.py new file mode 100644 index 0000000..0dd8f20 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/datasets/datasets.py @@ -0,0 +1,34 @@ +import os +import torch +from .data_utils import trivial_batch_collator, worker_init_reset_seed + +datasets = {} +def register_dataset(name): + def decorator(cls): + datasets[name] = cls + return cls + return decorator + +def make_dataset(name, is_training, split, **kwargs): + """ + A simple dataset builder + """ + dataset = datasets[name](is_training, split, **kwargs) + return dataset + +def make_data_loader(dataset, is_training, generator, batch_size, num_workers): + """ + A simple dataloder builder + """ + loader = torch.utils.data.DataLoader( + dataset, + batch_size=batch_size, + num_workers=num_workers, + collate_fn=trivial_batch_collator, + worker_init_fn=(worker_init_reset_seed if is_training else None), + shuffle=is_training, + drop_last=is_training, + generator=generator, + persistent_workers=True + ) + return loader diff --git a/Downstream/Temporal-Action-Localization/libs/datasets/thumos14.py b/Downstream/Temporal-Action-Localization/libs/datasets/thumos14.py new file mode 100644 index 0000000..0b686a1 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/datasets/thumos14.py @@ -0,0 +1,231 @@ +import os +import json +import numpy as np + +import torch +from torch.utils.data import Dataset +from torch.nn import functional as F + +from .datasets import register_dataset +from .data_utils import truncate_feats + +import pickle + +import io +from petrel_client.client import Client +client = Client() + +@register_dataset("thumos") +class THUMOS14Dataset(Dataset): + def __init__( + self, + is_training, # if in training mode + split, # split, a tuple/list allowing concat of subsets + feat_folder, # folder for features + json_file, # json file for annotations + feat_stride, # temporal stride of the feats + num_frames, # number of frames for each feat + default_fps, # default fps + downsample_rate, # downsample rate for feats + max_seq_len, # maximum sequence length during training + trunc_thresh, # threshold for truncate an action segment + crop_ratio, # a tuple (e.g., (0.9, 1.0)) for random cropping + input_dim, # input feat dim + num_classes, # number of action categories + file_prefix, # feature file prefix if any + file_ext, # feature file extension if any + force_upsampling # force to upsample to max_seq_len + ): + # file path + # # assert os.path.exists(feat_folder) and os.path.exists(json_file) + # assert client.isdir(feat_folder) and os.path.exists(json_file) + assert isinstance(split, tuple) or isinstance(split, list) + assert crop_ratio == None or len(crop_ratio) == 2 + self.feat_folder = feat_folder + if file_prefix is not None: + self.file_prefix = file_prefix + else: + self.file_prefix = '' + self.file_ext = file_ext + self.json_file = json_file + + # split / training mode + self.split = split + self.is_training = is_training + + # features meta info + self.feat_stride = feat_stride + self.num_frames = num_frames + self.input_dim = input_dim + self.default_fps = default_fps + self.downsample_rate = downsample_rate + self.max_seq_len = max_seq_len + self.trunc_thresh = trunc_thresh + self.num_classes = num_classes + self.label_dict = None + self.crop_ratio = crop_ratio + + # load database and select the subset + dict_db, label_dict = self._load_json_db(self.json_file) + assert len(label_dict) == num_classes + self.data_list = dict_db + self.label_dict = label_dict + + # dataset specific attributes + self.db_attributes = { + 'dataset_name': 'thumos-14', + 'tiou_thresholds': np.linspace(0.3, 0.7, 5), + # we will mask out cliff diving + 'empty_label_ids': [], + } + + def get_attributes(self): + return self.db_attributes + + def _load_json_db(self, json_file): + # load database and select the subset + with open(json_file, 'r') as fid: + json_data = json.load(fid) + json_db = json_data['database'] + + # if label_dict is not available + if self.label_dict is None: + label_dict = {} + for key, value in json_db.items(): + for act in value['annotations']: + label_dict[act['label']] = act['label_id'] + + # fill in the db (immutable afterwards) + dict_db = tuple() + for key, value in json_db.items(): + # skip the video if not in the split + if value['subset'].lower() not in self.split: + continue + # or does not have the feature file + feat_file = os.path.join(self.feat_folder, + self.file_prefix + key + self.file_ext) + if not os.path.exists(feat_file): + # if not client.contains(feat_file): + continue + # if not os.path.exists(os.path.join('/mnt/petrelfs/liuyi/tsp_th14_mae_h',self.file_prefix + key+'.pkl')): + # continue + + # get fps if available + if self.default_fps is not None: + fps = self.default_fps + elif 'fps' in value: + fps = value['fps'] + else: + assert False, "Unknown video FPS." + + # get video duration if available + if 'duration' in value: + duration = value['duration'] + else: + duration = 1e8 + + # get annotations if available + if ('annotations' in value) and (len(value['annotations']) > 0): + # a fun fact of THUMOS: cliffdiving (4) is a subset of diving (7) + # our code can now handle this corner case + segments, labels = [], [] + for act in value['annotations']: + segments.append(act['segment']) + labels.append([label_dict[act['label']]]) + + segments = np.asarray(segments, dtype=np.float32) + labels = np.squeeze(np.asarray(labels, dtype=np.int64), axis=1) + else: + segments = None + labels = None + dict_db += ({'id': key, + 'fps' : fps, + 'duration' : duration, + 'segments' : segments, + 'labels' : labels + }, ) + + return dict_db, label_dict + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + # directly return a (truncated) data point (so it is very fast!) + # auto batching will be disabled in the subsequent dataloader + # instead the model will need to decide how to batch / preporcess the data + video_item = self.data_list[idx] + + # load features + filename = os.path.join(self.feat_folder, + self.file_prefix + video_item['id'] + self.file_ext) + + + feats_i3d = np.load(filename).astype(np.float32) + + # feats_mae=pickle.load(io.BytesIO(client.get(os.path.join('s3://video_pub/thumos_feature/stride_8',video_item['id']+'.pkl'))) ) + # feats_mae=np.load(io.BytesIO(client.get(os.path.join('s3://video_pub/thumos_feature/clip_16_4/',video_item['id']+'.npy'))) ).astype(np.float32) + # feats_mae=pickle.load(io.BytesIO(client.get(os.path.join('s3://video_pub/thumos_feature/',video_item['id']+'.pkl'))) ) + + + + feats_mae=np.load(os.path.join('/mnt/petrelfs/liuyi/data_feature/data_th14/th14_mae_g_16_4',video_item['id']+'.npy')).astype(np.float32) + # feats_mae=pickle.load(io.BytesIO(client.get(os.path.join('/mnt/petrelfs/liuyi/tsp_th14_mae_h',video_item['id']+'.pkl'))) ) + + feats_mae = F.interpolate(torch.from_numpy(feats_mae).permute(1,0).unsqueeze(0), size=feats_i3d.shape[0], mode='linear',align_corners=False)[0,...].permute(1,0) + feats_mae = feats_mae.numpy() + + + + # feats_clip=np.load(io.BytesIO(client.get(os.path.join('/mnt/petrelfs/liuyi/th14_clipk700_16_4',video_item['id']+'.npy'))) ).astype(np.float32) + # feats_clip = F.interpolate(torch.from_numpy(feats_clip).permute(1,0).unsqueeze(0), size=feats_i3d.shape[0], mode='linear',align_corners=False)[0,...].permute(1,0) + # feats_clip = feats_clip.numpy() + # feats = np.concatenate([feats_clip, feats_mae], axis=1) + + feats=feats_mae + + # feats=feats_clip + + + # feats = feats_i3d[:,0:1024] + + # feats=feats_i3d[:,1024:] + + + + + + + # deal with downsampling (= increased feat stride) + feats = feats[::self.downsample_rate, :] + feat_stride = self.feat_stride * self.downsample_rate + # T x C -> C x T + feats = torch.from_numpy(np.ascontiguousarray(feats.transpose())) + + # convert time stamp (in second) into temporal feature grids + # ok to have small negative values here + if video_item['segments'] is not None: + segments = torch.from_numpy( + (video_item['segments'] * video_item['fps'] - 0.5 * self.num_frames) / feat_stride + ) + labels = torch.from_numpy(video_item['labels']) + else: + segments, labels = None, None + + # return a data dict + data_dict = {'video_id' : video_item['id'], + 'feats' : feats, # C x T + 'segments' : segments, # N x 2 + 'labels' : labels, # N + 'fps' : video_item['fps'], + 'duration' : video_item['duration'], + 'feat_stride' : feat_stride, + 'feat_num_frames' : self.num_frames} + + # truncate the features during training + if self.is_training and (segments is not None): + data_dict = truncate_feats( + data_dict, self.max_seq_len, self.trunc_thresh, self.crop_ratio + ) + + return data_dict diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__init__.py b/Downstream/Temporal-Action-Localization/libs/modeling/__init__.py new file mode 100644 index 0000000..18d2c22 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/__init__.py @@ -0,0 +1,11 @@ +from .blocks import (MaskedConv1D, MaskedMHCA, MaskedMHA, LayerNorm, + TransformerBlock, ConvBlock, Scale, AffineDropPath) +from .models import make_backbone, make_neck, make_meta_arch, make_generator +from . import backbones # backbones +from . import necks # necks +from . import loc_generators # location generators +from . import meta_archs # full models + +__all__ = ['MaskedConv1D', 'MaskedMHCA', 'MaskedMHA', 'LayerNorm' + 'TransformerBlock', 'ConvBlock', 'Scale', 'AffineDropPath', + 'make_backbone', 'make_neck', 'make_meta_arch', 'make_generator'] diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/__init__.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..5531940 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/backbones.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/backbones.cpython-36.pyc new file mode 100644 index 0000000..58bb166 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/backbones.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/blocks.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/blocks.cpython-36.pyc new file mode 100644 index 0000000..50ccd68 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/blocks.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/loc_generators.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/loc_generators.cpython-36.pyc new file mode 100644 index 0000000..c184f94 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/loc_generators.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/losses.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/losses.cpython-36.pyc new file mode 100644 index 0000000..6552840 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/losses.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/meta_archs.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/meta_archs.cpython-36.pyc new file mode 100644 index 0000000..bef3a9a Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/meta_archs.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/models.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/models.cpython-36.pyc new file mode 100644 index 0000000..90ab811 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/models.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/necks.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/necks.cpython-36.pyc new file mode 100644 index 0000000..b6ed53f Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/necks.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/weight_init.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/weight_init.cpython-36.pyc new file mode 100644 index 0000000..4c1508f Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/modeling/__pycache__/weight_init.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/backbones.py b/Downstream/Temporal-Action-Localization/libs/modeling/backbones.py new file mode 100644 index 0000000..23ea57f --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/backbones.py @@ -0,0 +1,237 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from .models import register_backbone +from .blocks import (get_sinusoid_encoding, TransformerBlock, MaskedConv1D, + ConvBlock, LayerNorm) + + +@register_backbone("convTransformer") +class ConvTransformerBackbone(nn.Module): + """ + A backbone that combines convolutions with transformers + """ + def __init__( + self, + n_in, # input feature dimension + n_embd, # embedding dimension (after convolution) + n_head, # number of head for self-attention in transformers + n_embd_ks, # conv kernel size of the embedding network + max_len, # max sequence length + arch = (2, 2, 5), # (#convs, #stem transformers, #branch transformers) + mha_win_size = [-1]*6, # size of local window for mha + scale_factor = 2, # dowsampling rate for the branch, + with_ln = False, # if to attach layernorm after conv + attn_pdrop = 0.0, # dropout rate for the attention map + proj_pdrop = 0.0, # dropout rate for the projection / MLP + path_pdrop = 0.0, # droput rate for drop path + use_abs_pe = False, # use absolute position embedding + use_rel_pe = False, # use relative position embedding + ): + super().__init__() + assert len(arch) == 3 + assert len(mha_win_size) == (1 + arch[2]) + self.arch = arch + self.mha_win_size = mha_win_size + self.max_len = max_len + self.relu = nn.ReLU(inplace=True) + self.scale_factor = scale_factor + self.use_abs_pe = use_abs_pe + self.use_rel_pe = use_rel_pe + + # position embedding (1, C, T), rescaled by 1/sqrt(n_embd) + if self.use_abs_pe: + pos_embd = get_sinusoid_encoding(self.max_len, n_embd) / (n_embd**0.5) + self.register_buffer("pos_embd", pos_embd, persistent=False) + + # embedding network using convs + self.embd = nn.ModuleList() + self.embd_norm = nn.ModuleList() + for idx in range(arch[0]): + if idx == 0: + in_channels = n_in + else: + in_channels = n_embd + self.embd.append(MaskedConv1D( + in_channels, n_embd, n_embd_ks, + stride=1, padding=n_embd_ks//2, bias=(not with_ln) + ) + ) + if with_ln: + self.embd_norm.append( + LayerNorm(n_embd) + ) + else: + self.embd_norm.append(nn.Identity()) + + # stem network using (vanilla) transformer + self.stem = nn.ModuleList() + for idx in range(arch[1]): + self.stem.append(TransformerBlock( + n_embd, n_head, + n_ds_strides=(1, 1), + attn_pdrop=attn_pdrop, + proj_pdrop=proj_pdrop, + path_pdrop=path_pdrop, + mha_win_size=self.mha_win_size[0], + use_rel_pe=self.use_rel_pe + ) + ) + + # main branch using transformer with pooling + self.branch = nn.ModuleList() + for idx in range(arch[2]): + self.branch.append(TransformerBlock( + n_embd, n_head, + n_ds_strides=(self.scale_factor, self.scale_factor), + attn_pdrop=attn_pdrop, + proj_pdrop=proj_pdrop, + path_pdrop=path_pdrop, + mha_win_size=self.mha_win_size[1+idx], + use_rel_pe=self.use_rel_pe + ) + ) + + # init weights + self.apply(self.__init_weights__) + + def __init_weights__(self, module): + # set nn.Linear/nn.Conv1d bias term to 0 + if isinstance(module, (nn.Linear, nn.Conv1d)): + if module.bias is not None: + torch.nn.init.constant_(module.bias, 0.) + + def forward(self, x, mask): + # x: batch size, feature channel, sequence length, + # mask: batch size, 1, sequence length (bool) + B, C, T = x.size() + + # embedding network + for idx in range(len(self.embd)): + x, mask = self.embd[idx](x, mask) + x = self.relu(self.embd_norm[idx](x)) + + # training: using fixed length position embeddings + if self.use_abs_pe and self.training: + assert T <= self.max_len, "Reached max length." + pe = self.pos_embd + # add pe to x + x = x + pe[:, :, :T] * mask.to(x.dtype) + + # inference: re-interpolate position embeddings for over-length sequences + if self.use_abs_pe and (not self.training): + if T >= self.max_len: + pe = F.interpolate( + self.pos_embd, T, mode='linear', align_corners=False) + else: + pe = self.pos_embd + # add pe to x + x = x + pe[:, :, :T] * mask.to(x.dtype) + + # stem transformer + for idx in range(len(self.stem)): + x, mask = self.stem[idx](x, mask) + + # prep for outputs + out_feats = tuple() + out_masks = tuple() + # 1x resolution + out_feats += (x, ) + out_masks += (mask, ) + + # main branch with downsampling + for idx in range(len(self.branch)): + x, mask = self.branch[idx](x, mask) + out_feats += (x, ) + out_masks += (mask, ) + + return out_feats, out_masks + +@register_backbone("conv") +class ConvBackbone(nn.Module): + """ + A backbone that with only conv + """ + def __init__( + self, + n_in, # input feature dimension + n_embd, # embedding dimension (after convolution) + n_embd_ks, # conv kernel size of the embedding network + arch = (2, 2, 5), # (#convs, #stem convs, #branch convs) + scale_factor = 2, # dowsampling rate for the branch + with_ln=False, # if to use layernorm + ): + super().__init__() + assert len(arch) == 3 + self.arch = arch + self.relu = nn.ReLU(inplace=True) + self.scale_factor = scale_factor + + # embedding network using convs + self.embd = nn.ModuleList() + self.embd_norm = nn.ModuleList() + for idx in range(arch[0]): + if idx == 0: + in_channels = n_in + else: + in_channels = n_embd + self.embd.append(MaskedConv1D( + in_channels, n_embd, n_embd_ks, + stride=1, padding=n_embd_ks//2, bias=(not with_ln) + ) + ) + if with_ln: + self.embd_norm.append( + LayerNorm(n_embd) + ) + else: + self.embd_norm.append(nn.Identity()) + + # stem network using (vanilla) transformer + self.stem = nn.ModuleList() + for idx in range(arch[1]): + self.stem.append(ConvBlock(n_embd, 3, 1)) + + # main branch using transformer with pooling + self.branch = nn.ModuleList() + for idx in range(arch[2]): + self.branch.append(ConvBlock(n_embd, 3, self.scale_factor)) + + # init weights + self.apply(self.__init_weights__) + + def __init_weights__(self, module): + # set nn.Linear bias term to 0 + if isinstance(module, (nn.Linear, nn.Conv1d)): + if module.bias is not None: + torch.nn.init.constant_(module.bias, 0.) + + def forward(self, x, mask): + # x: batch size, feature channel, sequence length, + # mask: batch size, 1, sequence length (bool) + B, C, T = x.size() + + # embedding network + for idx in range(len(self.embd)): + x, mask = self.embd[idx](x, mask) + x = self.relu(self.embd_norm[idx](x)) + + # stem conv + for idx in range(len(self.stem)): + x, mask = self.stem[idx](x, mask) + + # prep for outputs + out_feats = tuple() + out_masks = tuple() + # 1x resolution + out_feats += (x, ) + out_masks += (mask, ) + + # main branch with downsampling + for idx in range(len(self.branch)): + x, mask = self.branch[idx](x, mask) + out_feats += (x, ) + out_masks += (mask, ) + + return out_feats, out_masks diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/blocks.py b/Downstream/Temporal-Action-Localization/libs/modeling/blocks.py new file mode 100644 index 0000000..3508601 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/blocks.py @@ -0,0 +1,864 @@ +import math +import numpy as np + +import torch +import torch.nn.functional as F +from torch import nn +from .weight_init import trunc_normal_ + + +class MaskedConv1D(nn.Module): + """ + Masked 1D convolution. Interface remains the same as Conv1d. + Only support a sub set of 1d convs + """ + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode='zeros' + ): + super().__init__() + # element must be aligned + assert (kernel_size % 2 == 1) and (kernel_size // 2 == padding) + # stride + self.stride = stride + self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, + stride, padding, dilation, groups, bias, padding_mode) + # zero out the bias term if it exists + if bias: + torch.nn.init.constant_(self.conv.bias, 0.) + + def forward(self, x, mask): + # x: batch size, feature channel, sequence length, + # mask: batch size, 1, sequence length (bool) + B, C, T = x.size() + # input length must be divisible by stride + assert T % self.stride == 0 + + # conv + out_conv = self.conv(x) + # compute the mask + if self.stride > 1: + # downsample the mask using nearest neighbor + out_mask = F.interpolate( + mask.to(x.dtype), + size=T//self.stride, + mode='nearest' + ) + else: + # masking out the features + out_mask = mask.to(x.dtype) + + # masking the output, stop grad to mask + out_conv = out_conv * out_mask.detach() + out_mask = out_mask.bool() + return out_conv, out_mask + + +class LayerNorm(nn.Module): + """ + LayerNorm that supports inputs of size B, C, T + """ + def __init__( + self, + num_channels, + eps = 1e-5, + affine = True, + device = None, + dtype = None, + ): + super().__init__() + factory_kwargs = {'device': device, 'dtype': dtype} + self.num_channels = num_channels + self.eps = eps + self.affine = affine + + if self.affine: + self.weight = nn.Parameter( + torch.ones([1, num_channels, 1], **factory_kwargs)) + self.bias = nn.Parameter( + torch.zeros([1, num_channels, 1], **factory_kwargs)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + + def forward(self, x): + assert x.dim() == 3 + assert x.shape[1] == self.num_channels + + # normalization along C channels + mu = torch.mean(x, dim=1, keepdim=True) + res_x = x - mu + sigma = torch.mean(res_x**2, dim=1, keepdim=True) + out = res_x / torch.sqrt(sigma + self.eps) + + # apply weight and bias + if self.affine: + out *= self.weight + out += self.bias + + return out + + +# helper functions for Transformer blocks +def get_sinusoid_encoding(n_position, d_hid): + ''' Sinusoid position encoding table ''' + + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + # return a tensor of size 1 C T + return torch.FloatTensor(sinusoid_table).unsqueeze(0).transpose(1, 2) + + +# attention / transformers +class MaskedMHA(nn.Module): + """ + Multi Head Attention with mask + + Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py + """ + + def __init__( + self, + n_embd, # dimension of the input embedding + n_head, # number of heads in multi-head self-attention + attn_pdrop=0.0, # dropout rate for the attention map + proj_pdrop=0.0 # dropout rate for projection op + ): + super().__init__() + assert n_embd % n_head == 0 + self.n_embd = n_embd + self.n_head = n_head + self.n_channels = n_embd // n_head + self.scale = 1.0 / math.sqrt(self.n_channels) + + # key, query, value projections for all heads + # it is OK to ignore masking, as the mask will be attached on the attention + self.key = nn.Conv1d(self.n_embd, self.n_embd, 1) + self.query = nn.Conv1d(self.n_embd, self.n_embd, 1) + self.value = nn.Conv1d(self.n_embd, self.n_embd, 1) + + # regularization + self.attn_drop = nn.Dropout(attn_pdrop) + self.proj_drop = nn.Dropout(proj_pdrop) + + # output projection + self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1) + + def forward(self, x, mask): + # x: batch size, feature channel, sequence length, + # mask: batch size, 1, sequence length (bool) + B, C, T = x.size() + + # calculate query, key, values for all heads in batch + # (B, nh * hs, T) + k = self.key(x) + q = self.query(x) + v = self.value(x) + + # move head forward to be the batch dim + # (B, nh * hs, T) -> (B, nh, T, hs) + k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + + # self-attention: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T) + att = (q * self.scale) @ k.transpose(-2, -1) + # prevent q from attending to invalid tokens + att = att.masked_fill(torch.logical_not(mask[:, :, None, :]), float('-inf')) + # softmax attn + att = F.softmax(att, dim=-1) + att = self.attn_drop(att) + # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs) + out = att @ (v * mask[:, :, :, None].to(v.dtype)) + # re-assemble all head outputs side by side + out = out.transpose(2, 3).contiguous().view(B, C, -1) + + # output projection + skip connection + out = self.proj_drop(self.proj(out)) * mask.to(out.dtype) + return out, mask + + +class MaskedMHCA(nn.Module): + """ + Multi Head Conv Attention with mask + + Add a depthwise convolution within a standard MHA + The extra conv op can be used to + (1) encode relative position information (relacing position encoding); + (2) downsample the features if needed; + (3) match the feature channels + + Note: With current implementation, the downsampled feature will be aligned + to every s+1 time step, where s is the downsampling stride. This allows us + to easily interpolate the corresponding positional embeddings. + + Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py + """ + + def __init__( + self, + n_embd, # dimension of the output features + n_head, # number of heads in multi-head self-attention + n_qx_stride=1, # dowsampling stride for query and input + n_kv_stride=1, # downsampling stride for key and value + attn_pdrop=0.0, # dropout rate for the attention map + proj_pdrop=0.0, # dropout rate for projection op + ): + super().__init__() + assert n_embd % n_head == 0 + self.n_embd = n_embd + self.n_head = n_head + self.n_channels = n_embd // n_head + self.scale = 1.0 / math.sqrt(self.n_channels) + + # conv/pooling operations + assert (n_qx_stride == 1) or (n_qx_stride % 2 == 0) + assert (n_kv_stride == 1) or (n_kv_stride % 2 == 0) + self.n_qx_stride = n_qx_stride + self.n_kv_stride = n_kv_stride + + # query conv (depthwise) + kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3 + stride, padding = self.n_kv_stride, kernel_size // 2 + # 1d depthwise conv + self.query_conv = MaskedConv1D( + self.n_embd, self.n_embd, kernel_size, + stride=stride, padding=padding, groups=self.n_embd, bias=False + ) + # layernorm + self.query_norm = LayerNorm(self.n_embd) + + # key, value conv (depthwise) + kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3 + stride, padding = self.n_kv_stride, kernel_size // 2 + # 1d depthwise conv + self.key_conv = MaskedConv1D( + self.n_embd, self.n_embd, kernel_size, + stride=stride, padding=padding, groups=self.n_embd, bias=False + ) + self.key_norm = LayerNorm(self.n_embd) + self.value_conv = MaskedConv1D( + self.n_embd, self.n_embd, kernel_size, + stride=stride, padding=padding, groups=self.n_embd, bias=False + ) + # layernorm + self.value_norm = LayerNorm(self.n_embd) + + # key, query, value projections for all heads + # it is OK to ignore masking, as the mask will be attached on the attention + self.key = nn.Conv1d(self.n_embd, self.n_embd, 1) + self.query = nn.Conv1d(self.n_embd, self.n_embd, 1) + self.value = nn.Conv1d(self.n_embd, self.n_embd, 1) + + # regularization + self.attn_drop = nn.Dropout(attn_pdrop) + self.proj_drop = nn.Dropout(proj_pdrop) + + # output projection + self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1) + + def forward(self, x, mask): + # x: batch size, feature channel, sequence length, + # mask: batch size, 1, sequence length (bool) + B, C, T = x.size() + + # query conv -> (B, nh * hs, T') + q, qx_mask = self.query_conv(x, mask) + q = self.query_norm(q) + # key, value conv -> (B, nh * hs, T'') + k, kv_mask = self.key_conv(x, mask) + k = self.key_norm(k) + v, _ = self.value_conv(x, mask) + v = self.value_norm(v) + + # projections + q = self.query(q) + k = self.key(k) + v = self.value(v) + + # move head forward to be the batch dim + # (B, nh * hs, T'/T'') -> (B, nh, T'/T'', hs) + k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + + # self-attention: (B, nh, T', hs) x (B, nh, hs, T'') -> (B, nh, T', T'') + att = (q * self.scale) @ k.transpose(-2, -1) + # prevent q from attending to invalid tokens + att = att.masked_fill(torch.logical_not(kv_mask[:, :, None, :]), float('-inf')) + # softmax attn + att = F.softmax(att, dim=-1) + att = self.attn_drop(att) + # (B, nh, T', T'') x (B, nh, T'', hs) -> (B, nh, T', hs) + out = att @ (v * kv_mask[:, :, :, None].to(v.dtype)) + # re-assemble all head outputs side by side + out = out.transpose(2, 3).contiguous().view(B, C, -1) + + # output projection + skip connection + out = self.proj_drop(self.proj(out)) * qx_mask.to(out.dtype) + return out, qx_mask + + +class LocalMaskedMHCA(nn.Module): + """ + Local Multi Head Conv Attention with mask + + Add a depthwise convolution within a standard MHA + The extra conv op can be used to + (1) encode relative position information (relacing position encoding); + (2) downsample the features if needed; + (3) match the feature channels + + Note: With current implementation, the downsampled feature will be aligned + to every s+1 time step, where s is the downsampling stride. This allows us + to easily interpolate the corresponding positional embeddings. + + The implementation is fairly tricky, code reference from + https://github.com/huggingface/transformers/blob/master/src/transformers/models/longformer/modeling_longformer.py + """ + + def __init__( + self, + n_embd, # dimension of the output features + n_head, # number of heads in multi-head self-attention + window_size, # size of the local attention window + n_qx_stride=1, # dowsampling stride for query and input + n_kv_stride=1, # downsampling stride for key and value + attn_pdrop=0.0, # dropout rate for the attention map + proj_pdrop=0.0, # dropout rate for projection op + use_rel_pe=False # use relative position encoding + ): + super().__init__() + assert n_embd % n_head == 0 + self.n_embd = n_embd + self.n_head = n_head + self.n_channels = n_embd // n_head + self.scale = 1.0 / math.sqrt(self.n_channels) + self.window_size = window_size + self.window_overlap = window_size // 2 + # must use an odd window size + assert self.window_size > 1 and self.n_head >= 1 + self.use_rel_pe = use_rel_pe + + # conv/pooling operations + assert (n_qx_stride == 1) or (n_qx_stride % 2 == 0) + assert (n_kv_stride == 1) or (n_kv_stride % 2 == 0) + self.n_qx_stride = n_qx_stride + self.n_kv_stride = n_kv_stride + + # query conv (depthwise) + kernel_size = self.n_qx_stride + 1 if self.n_qx_stride > 1 else 3 + stride, padding = self.n_kv_stride, kernel_size // 2 + # 1d depthwise conv + self.query_conv = MaskedConv1D( + self.n_embd, self.n_embd, kernel_size, + stride=stride, padding=padding, groups=self.n_embd, bias=False + ) + # layernorm + self.query_norm = LayerNorm(self.n_embd) + + # key, value conv (depthwise) + kernel_size = self.n_kv_stride + 1 if self.n_kv_stride > 1 else 3 + stride, padding = self.n_kv_stride, kernel_size // 2 + # 1d depthwise conv + self.key_conv = MaskedConv1D( + self.n_embd, self.n_embd, kernel_size, + stride=stride, padding=padding, groups=self.n_embd, bias=False + ) + self.key_norm = LayerNorm(self.n_embd) + self.value_conv = MaskedConv1D( + self.n_embd, self.n_embd, kernel_size, + stride=stride, padding=padding, groups=self.n_embd, bias=False + ) + # layernorm + self.value_norm = LayerNorm(self.n_embd) + + # key, query, value projections for all heads + # it is OK to ignore masking, as the mask will be attached on the attention + self.key = nn.Conv1d(self.n_embd, self.n_embd, 1) + self.query = nn.Conv1d(self.n_embd, self.n_embd, 1) + self.value = nn.Conv1d(self.n_embd, self.n_embd, 1) + + # regularization + self.attn_drop = nn.Dropout(attn_pdrop) + self.proj_drop = nn.Dropout(proj_pdrop) + + # output projection + self.proj = nn.Conv1d(self.n_embd, self.n_embd, 1) + + # relative position encoding + if self.use_rel_pe: + self.rel_pe = nn.Parameter( + torch.zeros(1, 1, self.n_head, self.window_size)) + trunc_normal_(self.rel_pe, std=(2.0 / self.n_embd)**0.5) + + @staticmethod + def _chunk(x, window_overlap): + """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" + # x: B x nh, T, hs + # non-overlapping chunks of size = 2w -> B x nh, T//2w, 2w, hs + x = x.view( + x.size(0), + x.size(1) // (window_overlap * 2), + window_overlap * 2, + x.size(2), + ) + + # use `as_strided` to make the chunks overlap with an overlap size = window_overlap + chunk_size = list(x.size()) + chunk_size[1] = chunk_size[1] * 2 - 1 + chunk_stride = list(x.stride()) + chunk_stride[1] = chunk_stride[1] // 2 + + # B x nh, #chunks = T//w - 1, 2w, hs + return x.as_strided(size=chunk_size, stride=chunk_stride) + + @staticmethod + def _pad_and_transpose_last_two_dims(x, padding): + """pads rows and then flips rows and columns""" + # padding value is not important because it will be overwritten + x = nn.functional.pad(x, padding) + x = x.view(*x.size()[:-2], x.size(-1), x.size(-2)) + return x + + @staticmethod + def _mask_invalid_locations(input_tensor, affected_seq_len): + beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0]) + beginning_mask = beginning_mask_2d[None, :, None, :] + ending_mask = beginning_mask.flip(dims=(1, 3)) + beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] + beginning_mask = beginning_mask.expand(beginning_input.size()) + # `== 1` converts to bool or uint8 + beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) + ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] + ending_mask = ending_mask.expand(ending_input.size()) + # `== 1` converts to bool or uint8 + ending_input.masked_fill_(ending_mask == 1, -float("inf")) + + @staticmethod + def _pad_and_diagonalize(x): + """ + shift every row 1 step right, converting columns into diagonals. + Example:: + chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, + -1.8348, 0.7672, 0.2986, 0.0285, + -0.7584, 0.4206, -0.0405, 0.1599, + 2.0514, -1.1600, 0.5372, 0.2629 ] + window_overlap = num_rows = 4 + (pad & diagonalize) => + [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 + 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 + 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 + 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] + """ + total_num_heads, num_chunks, window_overlap, hidden_dim = x.size() + # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). + x = nn.functional.pad( + x, (0, window_overlap + 1) + ) + # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap + x = x.view(total_num_heads, num_chunks, -1) + # total_num_heads x num_chunks x window_overlap*window_overlap + x = x[:, :, :-window_overlap] + x = x.view( + total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim + ) + x = x[:, :, :, :-1] + return x + + def _sliding_chunks_query_key_matmul( + self, query, key, num_heads, window_overlap + ): + """ + Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w with an overlap of size w (window_overlap) + """ + # query / key: B*nh, T, hs + bnh, seq_len, head_dim = query.size() + batch_size = bnh // num_heads + assert seq_len % (window_overlap * 2) == 0 + assert query.size() == key.size() + + chunks_count = seq_len // window_overlap - 1 + + # B * num_heads, head_dim, #chunks=(T//w - 1), 2w + chunk_query = self._chunk(query, window_overlap) + chunk_key = self._chunk(key, window_overlap) + + # matrix multiplication + # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim + # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim + # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap + diagonal_chunked_attention_scores = torch.einsum( + "bcxd,bcyd->bcxy", (chunk_query, chunk_key)) + + # convert diagonals into columns + # B * num_heads, #chunks, 2w, 2w+1 + diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( + diagonal_chunked_attention_scores, padding=(0, 0, 0, 1) + ) + + # allocate space for the overall attention matrix where the chunks are combined. The last dimension + # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to + # window_overlap previous words). The following column is attention score from each word to itself, then + # followed by window_overlap columns for the upper triangle. + diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( + (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) + ) + + # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions + # - copying the main diagonal and the upper triangle + diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ + :, :, :window_overlap, : window_overlap + 1 + ] + diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ + :, -1, window_overlap:, : window_overlap + 1 + ] + # - copying the lower triangle + diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ + :, :, -(window_overlap + 1) : -1, window_overlap + 1 : + ] + + diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ + :, 0, : window_overlap - 1, 1 - window_overlap : + ] + + # separate batch_size and num_heads dimensions again + diagonal_attention_scores = diagonal_attention_scores.view( + batch_size, num_heads, seq_len, 2 * window_overlap + 1 + ).transpose(2, 1) + + self._mask_invalid_locations(diagonal_attention_scores, window_overlap) + return diagonal_attention_scores + + def _sliding_chunks_matmul_attn_probs_value( + self, attn_probs, value, num_heads, window_overlap + ): + """ + Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the + same shape as `attn_probs` + """ + bnh, seq_len, head_dim = value.size() + batch_size = bnh // num_heads + assert seq_len % (window_overlap * 2) == 0 + assert attn_probs.size(3) == 2 * window_overlap + 1 + chunks_count = seq_len // window_overlap - 1 + # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap + + chunked_attn_probs = attn_probs.transpose(1, 2).reshape( + batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 + ) + + # pad seq_len with w at the beginning of the sequence and another window overlap at the end + padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1) + + # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap + chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) + chunked_value_stride = padded_value.stride() + chunked_value_stride = ( + chunked_value_stride[0], + window_overlap * chunked_value_stride[1], + chunked_value_stride[1], + chunked_value_stride[2], + ) + chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) + + chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) + + context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value)) + return context.view(batch_size, num_heads, seq_len, head_dim) + + def forward(self, x, mask): + # x: batch size, feature channel, sequence length, + # mask: batch size, 1, sequence length (bool) + B, C, T = x.size() + + # step 1: depth convolutions + # query conv -> (B, nh * hs, T') + q, qx_mask = self.query_conv(x, mask) + q = self.query_norm(q) + # key, value conv -> (B, nh * hs, T'') + k, kv_mask = self.key_conv(x, mask) + k = self.key_norm(k) + v, _ = self.value_conv(x, mask) + v = self.value_norm(v) + + # step 2: query, key, value transforms & reshape + # projections + q = self.query(q) + k = self.key(k) + v = self.value(v) + # (B, nh * hs, T) -> (B, nh, T, hs) + q = q.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + k = k.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + v = v.view(B, self.n_head, self.n_channels, -1).transpose(2, 3) + # view as (B * nh, T, hs) + q = q.view(B * self.n_head, -1, self.n_channels).contiguous() + k = k.view(B * self.n_head, -1, self.n_channels).contiguous() + v = v.view(B * self.n_head, -1, self.n_channels).contiguous() + + # step 3: compute local self-attention with rel pe and masking + q *= self.scale + # chunked query key attention -> B, T, nh, 2w+1 = window_size + att = self._sliding_chunks_query_key_matmul( + q, k, self.n_head, self.window_overlap) + + # rel pe + if self.use_rel_pe: + att += self.rel_pe + # kv_mask -> B, T'', 1 + inverse_kv_mask = torch.logical_not( + kv_mask[:, :, :, None].view(B, -1, 1)) + # 0 for valid slot, -inf for masked ones + float_inverse_kv_mask = inverse_kv_mask.type_as(q).masked_fill( + inverse_kv_mask, -1e4) + # compute the diagonal mask (for each local window) + diagonal_mask = self._sliding_chunks_query_key_matmul( + float_inverse_kv_mask.new_ones(size=float_inverse_kv_mask.size()), + float_inverse_kv_mask, + 1, + self.window_overlap + ) + att += diagonal_mask + + # ignore input masking for now + att = nn.functional.softmax(att, dim=-1) + # softmax sometimes inserts NaN if all positions are masked, replace them with 0 + att = att.masked_fill( + torch.logical_not(kv_mask.squeeze(1)[:, :, None, None]), 0.0) + att = self.attn_drop(att) + + # step 4: compute attention value product + output projection + # chunked attn value product -> B, nh, T, hs + out = self._sliding_chunks_matmul_attn_probs_value( + att, v, self.n_head, self.window_overlap) + # transpose to B, nh, hs, T -> B, nh*hs, T + out = out.transpose(2, 3).contiguous().view(B, C, -1) + # output projection + skip connection + out = self.proj_drop(self.proj(out)) * qx_mask.to(out.dtype) + return out, qx_mask + + +class TransformerBlock(nn.Module): + """ + A simple (post layer norm) Transformer block + Modified from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py + """ + def __init__( + self, + n_embd, # dimension of the input features + n_head, # number of attention heads + n_ds_strides=(1, 1), # downsampling strides for q & x, k & v + n_out=None, # output dimension, if None, set to input dim + n_hidden=None, # dimension of the hidden layer in MLP + act_layer=nn.GELU, # nonlinear activation used in MLP, default GELU + attn_pdrop=0.0, # dropout rate for the attention map + proj_pdrop=0.0, # dropout rate for the projection / MLP + path_pdrop=0.0, # drop path rate + mha_win_size=-1, # > 0 to use window mha + use_rel_pe=False # if to add rel position encoding to attention + ): + super().__init__() + assert len(n_ds_strides) == 2 + # layer norm for order (B C T) + self.ln1 = LayerNorm(n_embd) + self.ln2 = LayerNorm(n_embd) + + # specify the attention module + if mha_win_size > 1: + self.attn = LocalMaskedMHCA( + n_embd, + n_head, + window_size=mha_win_size, + n_qx_stride=n_ds_strides[0], + n_kv_stride=n_ds_strides[1], + attn_pdrop=attn_pdrop, + proj_pdrop=proj_pdrop, + use_rel_pe=use_rel_pe # only valid for local attention + ) + else: + self.attn = MaskedMHCA( + n_embd, + n_head, + n_qx_stride=n_ds_strides[0], + n_kv_stride=n_ds_strides[1], + attn_pdrop=attn_pdrop, + proj_pdrop=proj_pdrop + ) + + # input + if n_ds_strides[0] > 1: + kernel_size, stride, padding = \ + n_ds_strides[0] + 1, n_ds_strides[0], (n_ds_strides[0] + 1)//2 + self.pool_skip = nn.MaxPool1d( + kernel_size, stride=stride, padding=padding) + else: + self.pool_skip = nn.Identity() + + # two layer mlp + if n_hidden is None: + n_hidden = 4 * n_embd # default + if n_out is None: + n_out = n_embd + # ok to use conv1d here with stride=1 + self.mlp = nn.Sequential( + nn.Conv1d(n_embd, n_hidden, 1), + act_layer(), + nn.Dropout(proj_pdrop, inplace=True), + nn.Conv1d(n_hidden, n_out, 1), + nn.Dropout(proj_pdrop, inplace=True), + ) + + # drop path + if path_pdrop > 0.0: + self.drop_path_attn = AffineDropPath(n_embd, drop_prob = path_pdrop) + self.drop_path_mlp = AffineDropPath(n_out, drop_prob = path_pdrop) + else: + self.drop_path_attn = nn.Identity() + self.drop_path_mlp = nn.Identity() + + def forward(self, x, mask, pos_embd=None): + # pre-LN transformer: https://arxiv.org/pdf/2002.04745.pdf + out, out_mask = self.attn(self.ln1(x), mask) + out_mask_float = out_mask.to(out.dtype) + out = self.pool_skip(x) * out_mask_float + self.drop_path_attn(out) + # FFN + out = out + self.drop_path_mlp(self.mlp(self.ln2(out)) * out_mask_float) + # optionally add pos_embd to the output + if pos_embd is not None: + out += pos_embd * out_mask_float + return out, out_mask + + +class ConvBlock(nn.Module): + """ + A simple conv block similar to the basic block used in ResNet + """ + def __init__( + self, + n_embd, # dimension of the input features + kernel_size=3, # conv kernel size + n_ds_stride=1, # downsampling stride for the current layer + expansion_factor=2, # expansion factor of feat dims + n_out=None, # output dimension, if None, set to input dim + act_layer=nn.ReLU, # nonlinear activation used after conv, default ReLU + ): + super().__init__() + # must use odd sized kernel + assert (kernel_size % 2 == 1) and (kernel_size > 1) + padding = kernel_size // 2 + if n_out is None: + n_out = n_embd + + # 1x3 (strided) -> 1x3 (basic block in resnet) + width = n_embd * expansion_factor + self.conv1 = MaskedConv1D( + n_embd, width, kernel_size, n_ds_stride, padding=padding) + self.conv2 = MaskedConv1D( + width, n_out, kernel_size, 1, padding=padding) + + # attach downsampling conv op + if n_ds_stride > 1: + # 1x1 strided conv (same as resnet) + self.downsample = MaskedConv1D(n_embd, n_out, 1, n_ds_stride) + else: + self.downsample = None + + self.act = act_layer() + + def forward(self, x, mask, pos_embd=None): + identity = x + out, out_mask = self.conv1(x, mask) + out = self.act(out) + out, out_mask = self.conv2(out, out_mask) + + # downsampling + if self.downsample is not None: + identity, _ = self.downsample(x, mask) + + # residual connection + out += identity + out = self.act(out) + + return out, out_mask + + +# drop path: from https://github.com/facebookresearch/SlowFast/blob/master/slowfast/models/common.py +class Scale(nn.Module): + """ + Multiply the output regression range by a learnable constant value + """ + def __init__(self, init_value=1.0): + """ + init_value : initial value for the scalar + """ + super().__init__() + self.scale = nn.Parameter( + torch.tensor(init_value, dtype=torch.float32), + requires_grad=True + ) + + def forward(self, x): + """ + input -> scale * input + """ + return x * self.scale + + +# The follow code is modified from +# https://github.com/facebookresearch/SlowFast/blob/master/slowfast/models/common.py +def drop_path(x, drop_prob=0.0, training=False): + """ + Stochastic Depth per sample. + """ + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * ( + x.ndim - 1 + ) # work with diff dim tensors, not just 2D ConvNets + mask = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + mask.floor_() # binarize + output = x.div(keep_prob) * mask + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +class AffineDropPath(nn.Module): + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks) with a per channel scaling factor (and zero init) + See: https://arxiv.org/pdf/2103.17239.pdf + """ + + def __init__(self, num_dim, drop_prob=0.0, init_scale_value=1e-4): + super().__init__() + self.scale = nn.Parameter( + init_scale_value * torch.ones((1, num_dim, 1)), + requires_grad=True + ) + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(self.scale * x, self.drop_prob, self.training) diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/loc_generators.py b/Downstream/Temporal-Action-Localization/libs/modeling/loc_generators.py new file mode 100644 index 0000000..51b7d16 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/loc_generators.py @@ -0,0 +1,86 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from .models import register_generator + + +class BufferList(nn.Module): + """ + Similar to nn.ParameterList, but for buffers + + Taken from https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/anchor_generator.py + """ + + def __init__(self, buffers): + super().__init__() + for i, buffer in enumerate(buffers): + # Use non-persistent buffer so the values are not saved in checkpoint + self.register_buffer(str(i), buffer, persistent=False) + + def __len__(self): + return len(self._buffers) + + def __iter__(self): + return iter(self._buffers.values()) + +@register_generator('point') +class PointGenerator(nn.Module): + """ + A generator for temporal "points" + + max_seq_len can be much larger than the actual seq length + """ + def __init__( + self, + max_seq_len, # max sequence length that the generator will buffer + fpn_levels, # number of fpn levels + scale_factor, # scale factor between two fpn levels + regression_range, # regression range (on feature grids) + use_offset=False # if to align the points at grid centers + ): + super().__init__() + # sanity check, # fpn levels and length divisible + assert len(regression_range) == fpn_levels + assert max_seq_len % scale_factor**(fpn_levels - 1) == 0 + + # save params + self.max_seq_len = max_seq_len + self.fpn_levels = fpn_levels + self.scale_factor = scale_factor + self.regression_range = regression_range + self.use_offset = use_offset + + # generate all points and buffer the list + self.buffer_points = self._generate_points() + + def _generate_points(self): + points_list = [] + # loop over all points at each pyramid level + for l in range(self.fpn_levels): + stride = self.scale_factor ** l + reg_range = torch.as_tensor( + self.regression_range[l], dtype=torch.float) + fpn_stride = torch.as_tensor(stride, dtype=torch.float) + points = torch.arange(0, self.max_seq_len, stride)[:, None] + # add offset if necessary (not in our current model) + if self.use_offset: + points += 0.5 * stride + # pad the time stamp with additional regression range / stride + reg_range = reg_range[None].repeat(points.shape[0], 1) + fpn_stride = fpn_stride[None].repeat(points.shape[0], 1) + # size: T x 4 (ts, reg_range, stride) + points_list.append(torch.cat((points, reg_range, fpn_stride), dim=1)) + + return BufferList(points_list) + + def forward(self, feats): + # feats will be a list of torch tensors + assert len(feats) == self.fpn_levels + pts_list = [] + feat_lens = [feat.shape[-1] for feat in feats] + for feat_len, buffer_pts in zip(feat_lens, self.buffer_points): + assert feat_len <= buffer_pts.shape[0], "Reached max buffer length for point generator" + pts = buffer_pts[:feat_len, :] + pts_list.append(pts) + return pts_list diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/losses.py b/Downstream/Temporal-Action-Localization/libs/modeling/losses.py new file mode 100644 index 0000000..b077b97 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/losses.py @@ -0,0 +1,168 @@ +import torch +from torch.nn import functional as F + +@torch.jit.script +def sigmoid_focal_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + alpha: float = 0.25, + gamma: float = 2.0, + reduction: str = "none", +) -> torch.Tensor: + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Taken from + https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py + # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = 0.25 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + Returns: + Loss tensor with the reduction option applied. + """ + inputs = inputs.float() + targets = targets.float() + p = torch.sigmoid(inputs) + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = p * targets + (1 - p) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + if reduction == "mean": + loss = loss.mean() + elif reduction == "sum": + loss = loss.sum() + + return loss + + +@torch.jit.script +def ctr_giou_loss_1d( + input_offsets: torch.Tensor, + target_offsets: torch.Tensor, + reduction: str = 'none', + eps: float = 1e-8, +) -> torch.Tensor: + """ + Generalized Intersection over Union Loss (Hamid Rezatofighi et. al) + https://arxiv.org/abs/1902.09630 + + This is an implementation that assumes a 1D event is represented using + the same center point with different offsets, e.g., + (t1, t2) = (c - o_1, c + o_2) with o_i >= 0 + + Reference code from + https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/giou_loss.py + + Args: + input/target_offsets (Tensor): 1D offsets of size (N, 2) + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + input_offsets = input_offsets.float() + target_offsets = target_offsets.float() + # check all 1D events are valid + assert (input_offsets >= 0.0).all(), "predicted offsets must be non-negative" + assert (target_offsets >= 0.0).all(), "GT offsets must be non-negative" + + lp, rp = input_offsets[:, 0], input_offsets[:, 1] + lg, rg = target_offsets[:, 0], target_offsets[:, 1] + + # intersection key points + lkis = torch.min(lp, lg) + rkis = torch.min(rp, rg) + + # iou + intsctk = rkis + lkis + unionk = (lp + rp) + (lg + rg) - intsctk + iouk = intsctk / unionk.clamp(min=eps) + + # giou is reduced to iou in our setting, skip unnecessary steps + loss = 1.0 - iouk + + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss + +@torch.jit.script +def ctr_diou_loss_1d( + input_offsets: torch.Tensor, + target_offsets: torch.Tensor, + reduction: str = 'none', + eps: float = 1e-8, +) -> torch.Tensor: + """ + Distance-IoU Loss (Zheng et. al) + https://arxiv.org/abs/1911.08287 + + This is an implementation that assumes a 1D event is represented using + the same center point with different offsets, e.g., + (t1, t2) = (c - o_1, c + o_2) with o_i >= 0 + + Reference code from + https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/giou_loss.py + + Args: + input/target_offsets (Tensor): 1D offsets of size (N, 2) + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + input_offsets = input_offsets.float() + target_offsets = target_offsets.float() + # check all 1D events are valid + assert (input_offsets >= 0.0).all(), "predicted offsets must be non-negative" + assert (target_offsets >= 0.0).all(), "GT offsets must be non-negative" + + lp, rp = input_offsets[:, 0], input_offsets[:, 1] + lg, rg = target_offsets[:, 0], target_offsets[:, 1] + + # intersection key points + lkis = torch.min(lp, lg) + rkis = torch.min(rp, rg) + + # iou + intsctk = rkis + lkis + unionk = (lp + rp) + (lg + rg) - intsctk + iouk = intsctk / unionk.clamp(min=eps) + + # smallest enclosing box + lc = torch.max(lp, lg) + rc = torch.max(rp, rg) + len_c = lc + rc + + # offset between centers + rho = 0.5 * (rp - lp - rg + lg) + + # diou + loss = 1.0 - iouk + torch.square(rho / len_c.clamp(min=eps)) + + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/meta_archs.py b/Downstream/Temporal-Action-Localization/libs/modeling/meta_archs.py new file mode 100644 index 0000000..0bc9a67 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/meta_archs.py @@ -0,0 +1,773 @@ +import math + +import torch +from torch import nn +from torch.nn import functional as F + +from .models import register_meta_arch, make_backbone, make_neck, make_generator +from .blocks import MaskedConv1D, Scale, LayerNorm +from .losses import ctr_diou_loss_1d, sigmoid_focal_loss + +from ..utils import batched_nms + + +class PtTransformerClsHead(nn.Module): + """ + 1D Conv heads for classification + """ + def __init__( + self, + input_dim, + feat_dim, + num_classes, + prior_prob=0.01, + num_layers=3, + kernel_size=3, + act_layer=nn.ReLU, + with_ln=False, + empty_cls = [] + ): + super().__init__() + self.act = act_layer() + + # build the head + self.head = nn.ModuleList() + self.norm = nn.ModuleList() + for idx in range(num_layers-1): + if idx == 0: + in_dim = input_dim + out_dim = feat_dim + else: + in_dim = feat_dim + out_dim = feat_dim + self.head.append( + MaskedConv1D( + in_dim, out_dim, kernel_size, + stride=1, + padding=kernel_size//2, + bias=(not with_ln) + ) + ) + if with_ln: + self.norm.append( + LayerNorm(out_dim) + ) + else: + self.norm.append(nn.Identity()) + + # classifier + self.cls_head = MaskedConv1D( + feat_dim, num_classes, kernel_size, + stride=1, padding=kernel_size//2 + ) + + # use prior in model initialization to improve stability + # this will overwrite other weight init + bias_value = -(math.log((1 - prior_prob) / prior_prob)) + torch.nn.init.constant_(self.cls_head.conv.bias, bias_value) + + # a quick fix to empty categories: + # the weights assocaited with these categories will remain unchanged + # we set their bias to a large negative value to prevent their outputs + if len(empty_cls) > 0: + bias_value = -(math.log((1 - 1e-6) / 1e-6)) + for idx in empty_cls: + torch.nn.init.constant_(self.cls_head.conv.bias[idx], bias_value) + + def forward(self, fpn_feats, fpn_masks): + assert len(fpn_feats) == len(fpn_masks) + + # apply the classifier for each pyramid level + out_logits = tuple() + for _, (cur_feat, cur_mask) in enumerate(zip(fpn_feats, fpn_masks)): + cur_out = cur_feat + for idx in range(len(self.head)): + cur_out, _ = self.head[idx](cur_out, cur_mask) + cur_out = self.act(self.norm[idx](cur_out)) + cur_logits, _ = self.cls_head(cur_out, cur_mask) + out_logits += (cur_logits, ) + + # fpn_masks remains the same + return out_logits + + +class PtTransformerRegHead(nn.Module): + """ + Shared 1D Conv heads for regression + Simlar logic as PtTransformerClsHead with separated implementation for clarity + """ + def __init__( + self, + input_dim, + feat_dim, + fpn_levels, + num_layers=3, + kernel_size=3, + act_layer=nn.ReLU, + with_ln=False + ): + super().__init__() + self.fpn_levels = fpn_levels + self.act = act_layer() + + # build the conv head + self.head = nn.ModuleList() + self.norm = nn.ModuleList() + for idx in range(num_layers-1): + if idx == 0: + in_dim = input_dim + out_dim = feat_dim + else: + in_dim = feat_dim + out_dim = feat_dim + self.head.append( + MaskedConv1D( + in_dim, out_dim, kernel_size, + stride=1, + padding=kernel_size//2, + bias=(not with_ln) + ) + ) + if with_ln: + self.norm.append( + LayerNorm(out_dim) + ) + else: + self.norm.append(nn.Identity()) + + self.scale = nn.ModuleList() + for idx in range(fpn_levels): + self.scale.append(Scale()) + + # segment regression + self.offset_head = MaskedConv1D( + feat_dim, 2, kernel_size, + stride=1, padding=kernel_size//2 + ) + + def forward(self, fpn_feats, fpn_masks): + assert len(fpn_feats) == len(fpn_masks) + assert len(fpn_feats) == self.fpn_levels + + # apply the classifier for each pyramid level + out_offsets = tuple() + for l, (cur_feat, cur_mask) in enumerate(zip(fpn_feats, fpn_masks)): + cur_out = cur_feat + for idx in range(len(self.head)): + cur_out, _ = self.head[idx](cur_out, cur_mask) + cur_out = self.act(self.norm[idx](cur_out)) + cur_offsets, _ = self.offset_head(cur_out, cur_mask) + out_offsets += (F.relu(self.scale[l](cur_offsets)), ) + + # fpn_masks remains the same + return out_offsets + + +@register_meta_arch("LocPointTransformer") +class PtTransformer(nn.Module): + """ + Transformer based model for single stage action localization + """ + def __init__( + self, + backbone_type, # a string defines which backbone we use + fpn_type, # a string defines which fpn we use + backbone_arch, # a tuple defines # layers in embed / stem / branch + scale_factor, # scale factor between branch layers + input_dim, # input feat dim + max_seq_len, # max sequence length (used for training) + max_buffer_len_factor, # max buffer size (defined a factor of max_seq_len) + n_head, # number of heads for self-attention in transformer + n_mha_win_size, # window size for self attention; -1 to use full seq + embd_kernel_size, # kernel size of the embedding network + embd_dim, # output feat channel of the embedding network + embd_with_ln, # attach layernorm to embedding network + fpn_dim, # feature dim on FPN + fpn_with_ln, # if to apply layer norm at the end of fpn + head_dim, # feature dim for head + regression_range, # regression range on each level of FPN + head_num_layers, # number of layers in the head (including the classifier) + head_kernel_size, # kernel size for reg/cls heads + head_with_ln, # attache layernorm to reg/cls heads + use_abs_pe, # if to use abs position encoding + use_rel_pe, # if to use rel position encoding + num_classes, # number of action classes + train_cfg, # other cfg for training + test_cfg # other cfg for testing + ): + super().__init__() + # re-distribute params to backbone / neck / head + self.fpn_strides = [scale_factor**i for i in range(backbone_arch[-1]+1)] + self.reg_range = regression_range + assert len(self.fpn_strides) == len(self.reg_range) + self.scale_factor = scale_factor + # #classes = num_classes + 1 (background) with last category as background + # e.g., num_classes = 10 -> 0, 1, ..., 9 as actions, 10 as background + self.num_classes = num_classes + + # check the feature pyramid and local attention window size + self.max_seq_len = max_seq_len + if isinstance(n_mha_win_size, int): + self.mha_win_size = [n_mha_win_size]*len(self.fpn_strides) + else: + assert len(n_mha_win_size) == len(self.fpn_strides) + self.mha_win_size = n_mha_win_size + max_div_factor = 1 + for l, (s, w) in enumerate(zip(self.fpn_strides, self.mha_win_size)): + stride = s * (w // 2) * 2 if w > 1 else s + # print(stride) + assert max_seq_len % stride == 0, "max_seq_len must be divisible by fpn stride and window size" + if max_div_factor < stride: + max_div_factor = stride + self.max_div_factor = max_div_factor + + # training time config + self.train_center_sample = train_cfg['center_sample'] + assert self.train_center_sample in ['radius', 'none'] + self.train_center_sample_radius = train_cfg['center_sample_radius'] + self.train_loss_weight = train_cfg['loss_weight'] + self.train_cls_prior_prob = train_cfg['cls_prior_prob'] + self.train_dropout = train_cfg['dropout'] + self.train_droppath = train_cfg['droppath'] + self.train_label_smoothing = train_cfg['label_smoothing'] + + # test time config + self.test_pre_nms_thresh = test_cfg['pre_nms_thresh'] + self.test_pre_nms_topk = test_cfg['pre_nms_topk'] + self.test_iou_threshold = test_cfg['iou_threshold'] + self.test_min_score = test_cfg['min_score'] + self.test_max_seg_num = test_cfg['max_seg_num'] + self.test_nms_method = test_cfg['nms_method'] + assert self.test_nms_method in ['soft', 'hard', 'none'] + self.test_duration_thresh = test_cfg['duration_thresh'] + self.test_multiclass_nms = test_cfg['multiclass_nms'] + self.test_nms_sigma = test_cfg['nms_sigma'] + self.test_voting_thresh = test_cfg['voting_thresh'] + + # we will need a better way to dispatch the params to backbones / necks + # backbone network: conv + transformer + assert backbone_type in ['convTransformer', 'conv'] + if backbone_type == 'convTransformer': + self.backbone = make_backbone( + 'convTransformer', + **{ + 'n_in' : input_dim, + 'n_embd' : embd_dim, + 'n_head': n_head, + 'n_embd_ks': embd_kernel_size, + 'max_len': max_seq_len, + 'arch' : backbone_arch, + 'mha_win_size': self.mha_win_size, + 'scale_factor' : scale_factor, + 'with_ln' : embd_with_ln, + 'attn_pdrop' : 0.0, + 'proj_pdrop' : self.train_dropout, + 'path_pdrop' : self.train_droppath, + 'use_abs_pe' : use_abs_pe, + 'use_rel_pe' : use_rel_pe + } + ) + else: + self.backbone = make_backbone( + 'conv', + **{ + 'n_in': input_dim, + 'n_embd': embd_dim, + 'n_embd_ks': embd_kernel_size, + 'arch': backbone_arch, + 'scale_factor': scale_factor, + 'with_ln' : embd_with_ln + } + ) + + # fpn network: convs + assert fpn_type in ['fpn', 'identity'] + self.neck = make_neck( + fpn_type, + **{ + 'in_channels' : [embd_dim] * (backbone_arch[-1] + 1), + 'out_channel' : fpn_dim, + 'scale_factor' : scale_factor, + 'with_ln' : fpn_with_ln + } + ) + + # location generator: points + self.point_generator = make_generator( + 'point', + **{ + 'max_seq_len' : max_seq_len * max_buffer_len_factor, + 'fpn_levels' : len(self.fpn_strides), + 'scale_factor' : scale_factor, + 'regression_range' : self.reg_range + } + ) + + # classfication and regerssion heads + self.cls_head = PtTransformerClsHead( + fpn_dim, head_dim, self.num_classes, + kernel_size=head_kernel_size, + prior_prob=self.train_cls_prior_prob, + with_ln=head_with_ln, + num_layers=head_num_layers, + empty_cls=train_cfg['head_empty_cls'] + ) + self.reg_head = PtTransformerRegHead( + fpn_dim, head_dim, len(self.fpn_strides), + kernel_size=head_kernel_size, + num_layers=head_num_layers, + with_ln=head_with_ln + ) + + # maintain an EMA of #foreground to stabilize the loss normalizer + # useful for small mini-batch training + self.loss_normalizer = train_cfg['init_loss_norm'] + self.loss_normalizer_momentum = 0.9 + + @property + def device(self): + # a hacky way to get the device type + # will throw an error if parameters are on different devices + return list(set(p.device for p in self.parameters()))[0] + + def forward(self, video_list): + # batch the video list into feats (B, C, T) and masks (B, 1, T) + batched_inputs, batched_masks = self.preprocessing(video_list) + + # forward the network (backbone -> neck -> heads) + feats, masks = self.backbone(batched_inputs, batched_masks) + fpn_feats, fpn_masks = self.neck(feats, masks) + # fpn_feats [16, 256, 768] ..[16, 256, 384]..[16, 256, 24] + + # compute the point coordinate along the FPN + # this is used for computing the GT or decode the final results + # points: List[T x 4] with length = # fpn levels + # (shared across all samples in the mini-batch) + points = self.point_generator(fpn_feats) + + # out_cls: List[B, #cls + 1, T_i] + out_cls_logits = self.cls_head(fpn_feats, fpn_masks) + # out_offset: List[B, 2, T_i] + out_offsets = self.reg_head(fpn_feats, fpn_masks) + + # permute the outputs + # out_cls: F List[B, #cls, T_i] -> F List[B, T_i, #cls] + out_cls_logits = [x.permute(0, 2, 1) for x in out_cls_logits] + # out_offset: F List[B, 2 (xC), T_i] -> F List[B, T_i, 2 (xC)] + out_offsets = [x.permute(0, 2, 1) for x in out_offsets] + # fpn_masks: F list[B, 1, T_i] -> F List[B, T_i] + fpn_masks = [x.squeeze(1) for x in fpn_masks] + + # return loss during training + if self.training: + # generate segment/lable List[N x 2] / List[N] with length = B + assert video_list[0]['segments'] is not None, "GT action labels does not exist" + assert video_list[0]['labels'] is not None, "GT action labels does not exist" + # print(video_list) + gt_segments = [x['segments'].to(self.device) for x in video_list] + gt_labels = [x['labels'].to(self.device) for x in video_list] + + # compute the gt labels for cls & reg + # list of prediction targets + gt_cls_labels, gt_offsets = self.label_points( + points, gt_segments, gt_labels) + + # compute the loss and return + losses = self.losses( + fpn_masks, + out_cls_logits, out_offsets, + gt_cls_labels, gt_offsets + ) + return losses + + else: + # decode the actions (sigmoid / stride, etc) + results = self.inference( + video_list, points, fpn_masks, + out_cls_logits, out_offsets + ) + return results + + @torch.no_grad() + def preprocessing(self, video_list, padding_val=0.0): + """ + Generate batched features and masks from a list of dict items + """ + feats = [x['feats'] for x in video_list] + feats_lens = torch.as_tensor([feat.shape[-1] for feat in feats]) + max_len = feats_lens.max(0).values.item() + + if self.training: + assert max_len <= self.max_seq_len, "Input length must be smaller than max_seq_len during training" + # set max_len to self.max_seq_len + max_len = self.max_seq_len + # batch input shape B, C, T + batch_shape = [len(feats), feats[0].shape[0], max_len] + batched_inputs = feats[0].new_full(batch_shape, padding_val) + for feat, pad_feat in zip(feats, batched_inputs): + pad_feat[..., :feat.shape[-1]].copy_(feat) + else: + assert len(video_list) == 1, "Only support batch_size = 1 during inference" + # input length < self.max_seq_len, pad to max_seq_len + if max_len <= self.max_seq_len: + max_len = self.max_seq_len + else: + # pad the input to the next divisible size + stride = self.max_div_factor + max_len = (max_len + (stride - 1)) // stride * stride + padding_size = [0, max_len - feats_lens[0]] + batched_inputs = F.pad( + feats[0], padding_size, value=padding_val).unsqueeze(0) + + # generate the mask + batched_masks = torch.arange(max_len)[None, :] < feats_lens[:, None] + + # push to device + batched_inputs = batched_inputs.to(self.device) + batched_masks = batched_masks.unsqueeze(1).to(self.device) + + return batched_inputs, batched_masks + + @torch.no_grad() + def label_points(self, points, gt_segments, gt_labels): + # concat points on all fpn levels List[T x 4] -> F T x 4 + # This is shared for all samples in the mini-batch + num_levels = len(points) + concat_points = torch.cat(points, dim=0) + gt_cls, gt_offset = [], [] + + # loop over each video sample + for gt_segment, gt_label in zip(gt_segments, gt_labels): + cls_targets, reg_targets = self.label_points_single_video( + concat_points, gt_segment, gt_label + ) + # append to list (len = # images, each of size FT x C) + gt_cls.append(cls_targets) + gt_offset.append(reg_targets) + + return gt_cls, gt_offset + + @torch.no_grad() + def label_points_single_video(self, concat_points, gt_segment, gt_label): + # concat_points : F T x 4 (t, regressoin range, stride) + # gt_segment : N (#Events) x 2 + # gt_label : N (#Events) x 1 + num_pts = concat_points.shape[0] + num_gts = gt_segment.shape[0] + + # corner case where current sample does not have actions + if num_gts == 0: + cls_targets = gt_segment.new_full((num_pts, self.num_classes), 0) + reg_targets = gt_segment.new_zeros((num_pts, 2)) + return cls_targets, reg_targets + + # compute the lengths of all segments -> F T x N + lens = gt_segment[:, 1] - gt_segment[:, 0] + lens = lens[None, :].repeat(num_pts, 1) + + # compute the distance of every point to each segment boundary + # auto broadcasting for all reg target-> F T x N x2 + gt_segs = gt_segment[None].expand(num_pts, num_gts, 2) + left = concat_points[:, 0, None] - gt_segs[:, :, 0] + right = gt_segs[:, :, 1] - concat_points[:, 0, None] + reg_targets = torch.stack((left, right), dim=-1) + + if self.train_center_sample == 'radius': + # center of all segments F T x N + center_pts = 0.5 * (gt_segs[:, :, 0] + gt_segs[:, :, 1]) + # center sampling based on stride radius + # compute the new boundaries: + # concat_points[:, 3] stores the stride + t_mins = \ + center_pts - concat_points[:, 3, None] * self.train_center_sample_radius + t_maxs = \ + center_pts + concat_points[:, 3, None] * self.train_center_sample_radius + # prevent t_mins / maxs from over-running the action boundary + # left: torch.maximum(t_mins, gt_segs[:, :, 0]) + # right: torch.minimum(t_maxs, gt_segs[:, :, 1]) + # F T x N (distance to the new boundary) + cb_dist_left = concat_points[:, 0, None] \ + - torch.maximum(t_mins, gt_segs[:, :, 0]) + cb_dist_right = torch.minimum(t_maxs, gt_segs[:, :, 1]) \ + - concat_points[:, 0, None] + # F T x N x 2 + center_seg = torch.stack( + (cb_dist_left, cb_dist_right), -1) + # F T x N + inside_gt_seg_mask = center_seg.min(-1)[0] > 0 + else: + # inside an gt action + inside_gt_seg_mask = reg_targets.min(-1)[0] > 0 + + # limit the regression range for each location + max_regress_distance = reg_targets.max(-1)[0] + # F T x N + inside_regress_range = torch.logical_and( + (max_regress_distance >= concat_points[:, 1, None]), + (max_regress_distance <= concat_points[:, 2, None]) + ) + + # if there are still more than one actions for one moment + # pick the one with the shortest duration (easiest to regress) + lens.masked_fill_(inside_gt_seg_mask==0, float('inf')) + lens.masked_fill_(inside_regress_range==0, float('inf')) + # F T x N -> F T + min_len, min_len_inds = lens.min(dim=1) + + # corner case: multiple actions with very similar durations (e.g., THUMOS14) + min_len_mask = torch.logical_and( + (lens <= (min_len[:, None] + 1e-3)), (lens < float('inf')) + ).to(reg_targets.dtype) + + # cls_targets: F T x C; reg_targets F T x 2 + gt_label_one_hot = F.one_hot( + gt_label, self.num_classes + ).to(reg_targets.dtype) + cls_targets = min_len_mask @ gt_label_one_hot + # to prevent multiple GT actions with the same label and boundaries + cls_targets.clamp_(min=0.0, max=1.0) + # OK to use min_len_inds + reg_targets = reg_targets[range(num_pts), min_len_inds] + # normalization based on stride + reg_targets /= concat_points[:, 3, None] + + return cls_targets, reg_targets + + def losses( + self, fpn_masks, + out_cls_logits, out_offsets, + gt_cls_labels, gt_offsets + ): + + + # self.criterion_KL = nn.KLDivLoss(reduction='batchmean', log_target=True) + # kl_loss=[] + # for ii in range(len(out_cls_logits)-1): + # outputs=out_cls_logits[ii][:,::2,:] + # targets=out_cls_logits[ii+1] + # outputs = F.softmax(outputs, dim=2) + # targets = F.softmax(targets, dim=2) + # loss1 = self.criterion_KL(outputs, targets) + # loss2 = self.criterion_KL(targets, outputs) + # kl_loss.append(loss1*0.2 + loss2*0.2) + + # fpn_masks, out_*: F (List) [B, T_i, C] + # gt_* : B (list) [F T, C] + # fpn_masks -> (B, FT) + valid_mask = torch.cat(fpn_masks, dim=1) + + # 1. classification loss + # stack the list -> (B, FT) -> (# Valid, ) + gt_cls = torch.stack(gt_cls_labels) + pos_mask = torch.logical_and((gt_cls.sum(-1) > 0), valid_mask) + + # cat the predicted offsets -> (B, FT, 2 (xC)) -> # (#Pos, 2 (xC)) + pred_offsets = torch.cat(out_offsets, dim=1)[pos_mask] + gt_offsets = torch.stack(gt_offsets)[pos_mask] + + # update the loss normalizer + num_pos = pos_mask.sum().item() + self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + ( + 1 - self.loss_normalizer_momentum + ) * max(num_pos, 1) + + # gt_cls is already one hot encoded now, simply masking out + gt_target = gt_cls[valid_mask] + + # optinal label smoothing + gt_target *= 1 - self.train_label_smoothing + gt_target += self.train_label_smoothing / (self.num_classes + 1) + + # focal loss + cls_loss = sigmoid_focal_loss( + torch.cat(out_cls_logits, dim=1)[valid_mask], + gt_target, + reduction='sum' + ) + cls_loss /= self.loss_normalizer + + # 2. regression using IoU/GIoU loss (defined on positive samples) + if num_pos == 0: + reg_loss = 0 * pred_offsets.sum() + else: + # giou loss defined on positive samples + reg_loss = ctr_diou_loss_1d( + pred_offsets, + gt_offsets, + reduction='sum' + ) + reg_loss /= self.loss_normalizer + + if self.train_loss_weight > 0: + loss_weight = self.train_loss_weight + else: + loss_weight = cls_loss.detach() / max(reg_loss.item(), 0.01) + + # return a dict of losses + final_loss = cls_loss + reg_loss * loss_weight + # for kl in kl_loss: + # final_loss=final_loss+kl + return {'cls_loss' : cls_loss, + 'reg_loss' : reg_loss, + 'final_loss' : final_loss} + + @torch.no_grad() + def inference( + self, + video_list, + points, fpn_masks, + out_cls_logits, out_offsets + ): + # video_list B (list) [dict] + # points F (list) [T_i, 4] + # fpn_masks, out_*: F (List) [B, T_i, C] + results = [] + + # 1: gather video meta information + vid_idxs = [x['video_id'] for x in video_list] + vid_fps = [x['fps'] for x in video_list] + vid_lens = [x['duration'] for x in video_list] + vid_ft_stride = [x['feat_stride'] for x in video_list] + vid_ft_nframes = [x['feat_num_frames'] for x in video_list] + + # 2: inference on each single video and gather the results + # upto this point, all results use timestamps defined on feature grids + for idx, (vidx, fps, vlen, stride, nframes) in enumerate( + zip(vid_idxs, vid_fps, vid_lens, vid_ft_stride, vid_ft_nframes) + ): + # gather per-video outputs + cls_logits_per_vid = [x[idx] for x in out_cls_logits] + offsets_per_vid = [x[idx] for x in out_offsets] + fpn_masks_per_vid = [x[idx] for x in fpn_masks] + # inference on a single video (should always be the case) + results_per_vid = self.inference_single_video( + points, fpn_masks_per_vid, + cls_logits_per_vid, offsets_per_vid + ) + # pass through video meta info + results_per_vid['video_id'] = vidx + results_per_vid['fps'] = fps + results_per_vid['duration'] = vlen + results_per_vid['feat_stride'] = stride + results_per_vid['feat_num_frames'] = nframes + results.append(results_per_vid) + + # step 3: postprocssing + results = self.postprocessing(results) + + return results + + @torch.no_grad() + def inference_single_video( + self, + points, + fpn_masks, + out_cls_logits, + out_offsets, + ): + # points F (list) [T_i, 4] + # fpn_masks, out_*: F (List) [T_i, C] + segs_all = [] + scores_all = [] + cls_idxs_all = [] + + # loop over fpn levels + for cls_i, offsets_i, pts_i, mask_i in zip( + out_cls_logits, out_offsets, points, fpn_masks + ): + # sigmoid normalization for output logits + pred_prob = (cls_i.sigmoid() * mask_i.unsqueeze(-1)).flatten() + + # Apply filtering to make NMS faster following detectron2 + # 1. Keep seg with confidence score > a threshold + keep_idxs1 = (pred_prob > self.test_pre_nms_thresh) + pred_prob = pred_prob[keep_idxs1] + topk_idxs = keep_idxs1.nonzero(as_tuple=True)[0] + + # 2. Keep top k top scoring boxes only + num_topk = min(self.test_pre_nms_topk, topk_idxs.size(0)) + pred_prob, idxs = pred_prob.sort(descending=True) + pred_prob = pred_prob[:num_topk].clone() + topk_idxs = topk_idxs[idxs[:num_topk]].clone() + + # fix a warning in pytorch 1.9 + pt_idxs = torch.div( + topk_idxs, self.num_classes, rounding_mode='floor' + ) + cls_idxs = torch.fmod(topk_idxs, self.num_classes) + + # 3. gather predicted offsets + offsets = offsets_i[pt_idxs] + pts = pts_i[pt_idxs] + + # 4. compute predicted segments (denorm by stride for output offsets) + seg_left = pts[:, 0] - offsets[:, 0] * pts[:, 3] + seg_right = pts[:, 0] + offsets[:, 1] * pts[:, 3] + pred_segs = torch.stack((seg_left, seg_right), -1) + + # 5. Keep seg with duration > a threshold (relative to feature grids) + seg_areas = seg_right - seg_left + keep_idxs2 = seg_areas > self.test_duration_thresh + + # *_all : N (filtered # of segments) x 2 / 1 + segs_all.append(pred_segs[keep_idxs2]) + scores_all.append(pred_prob[keep_idxs2]) + cls_idxs_all.append(cls_idxs[keep_idxs2]) + + # cat along the FPN levels (F N_i, C) + segs_all, scores_all, cls_idxs_all = [ + torch.cat(x) for x in [segs_all, scores_all, cls_idxs_all] + ] + results = {'segments' : segs_all, + 'scores' : scores_all, + 'labels' : cls_idxs_all} + + return results + + @torch.no_grad() + def postprocessing(self, results): + # input : list of dictionary items + # (1) push to CPU; (2) NMS; (3) convert to actual time stamps + processed_results = [] + for results_per_vid in results: + # unpack the meta info + vidx = results_per_vid['video_id'] + fps = results_per_vid['fps'] + vlen = float(results_per_vid['duration']) + stride = results_per_vid['feat_stride'] + nframes = results_per_vid['feat_num_frames'] + # 1: unpack the results and move to CPU + segs = results_per_vid['segments'].detach().cpu() + scores = results_per_vid['scores'].detach().cpu() + labels = results_per_vid['labels'].detach().cpu() + + + if self.test_nms_method != 'none': + # 2: batched nms (only implemented on CPU) + segs, scores, labels = batched_nms( + segs, scores, labels, + self.test_iou_threshold, + self.test_min_score, + self.test_max_seg_num, + use_soft_nms = (self.test_nms_method == 'soft'), + multiclass = self.test_multiclass_nms, + sigma = self.test_nms_sigma, + voting_thresh = self.test_voting_thresh + ) + + + # 3: convert from feature grids to seconds + if segs.shape[0] > 0: + segs = (segs * stride + 0.5 * nframes) / fps + # truncate all boundaries within [0, duration] + segs[segs<=0.0] *= 0.0 + segs[segs>=vlen] = segs[segs>=vlen] * 0.0 + vlen + # 4: repack the results + processed_results.append( + {'video_id' : vidx, + 'segments' : segs, + 'scores' : scores, + 'labels' : labels} + ) + + return processed_results diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/models.py b/Downstream/Temporal-Action-Localization/libs/modeling/models.py new file mode 100644 index 0000000..abae35d --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/models.py @@ -0,0 +1,50 @@ +import os + +# backbone (e.g., conv / transformer) +backbones = {} +def register_backbone(name): + def decorator(cls): + backbones[name] = cls + return cls + return decorator + +# neck (e.g., FPN) +necks = {} +def register_neck(name): + def decorator(cls): + necks[name] = cls + return cls + return decorator + +# location generator (point, segment, etc) +generators = {} +def register_generator(name): + def decorator(cls): + generators[name] = cls + return cls + return decorator + +# meta arch (the actual implementation of each model) +meta_archs = {} +def register_meta_arch(name): + def decorator(cls): + meta_archs[name] = cls + return cls + return decorator + +# builder functions +def make_backbone(name, **kwargs): + backbone = backbones[name](**kwargs) + return backbone + +def make_neck(name, **kwargs): + neck = necks[name](**kwargs) + return neck + +def make_meta_arch(name, **kwargs): + meta_arch = meta_archs[name](**kwargs) + return meta_arch + +def make_generator(name, **kwargs): + generator = generators[name](**kwargs) + return generator diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/necks.py b/Downstream/Temporal-Action-Localization/libs/modeling/necks.py new file mode 100644 index 0000000..b4ccd1e --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/necks.py @@ -0,0 +1,139 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from .models import register_neck +from .blocks import MaskedConv1D, LayerNorm + +@register_neck("fpn") +class FPN1D(nn.Module): + """ + Feature pyramid network + """ + def __init__( + self, + in_channels, # input feature channels, len(in_channels) = # levels + out_channel, # output feature channel + scale_factor=2.0, # downsampling rate between two fpn levels + start_level=0, # start fpn level + end_level=-1, # end fpn level + with_ln=True # if to apply layer norm at the end + ): + super().__init__() + assert isinstance(in_channels, list) or isinstance(in_channels, tuple) + + self.in_channels = in_channels + self.out_channel = out_channel + self.scale_factor = scale_factor + + self.start_level = start_level + if end_level == -1: + self.end_level = len(in_channels) + else: + self.end_level = end_level + assert self.end_level <= len(in_channels) + assert (self.start_level >= 0) and (self.start_level < self.end_level) + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + self.fpn_norms = nn.ModuleList() + for i in range(self.start_level, self.end_level): + # disable bias if using layer norm + l_conv = MaskedConv1D( + in_channels[i], out_channel, 1, bias=(not with_ln)) + # use depthwise conv here for efficiency + fpn_conv = MaskedConv1D( + out_channel, out_channel, 3, + padding=1, bias=(not with_ln), groups=out_channel + ) + # layer norm for order (B C T) + if with_ln: + fpn_norm = LayerNorm(out_channel) + else: + fpn_norm = nn.Identity() + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + self.fpn_norms.append(fpn_norm) + + def forward(self, inputs, fpn_masks): + # inputs must be a list / tuple + assert len(inputs) == len(self.in_channels) + assert len(fpn_masks) == len(self.in_channels) + + # build laterals, fpn_masks will remain the same with 1x1 convs + laterals = [] + for i in range(len(self.lateral_convs)): + x, _ = self.lateral_convs[i]( + inputs[i + self.start_level], fpn_masks[i + self.start_level] + ) + laterals.append(x) + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + laterals[i-1] += F.interpolate( + laterals[i], + scale_factor=self.scale_factor, + mode='nearest' + ) + + # fpn conv / norm -> outputs + # mask will remain the same + fpn_feats = tuple() + for i in range(used_backbone_levels): + x, _ = self.fpn_convs[i]( + laterals[i], fpn_masks[i + self.start_level]) + x = self.fpn_norms[i](x) + fpn_feats += (x, ) + + return fpn_feats, fpn_masks + +@register_neck('identity') +class FPNIdentity(nn.Module): + def __init__( + self, + in_channels, # input feature channels, len(in_channels) = # levels + out_channel, # output feature channel + scale_factor=2.0, # downsampling rate between two fpn levels + start_level=0, # start fpn level + end_level=-1, # end fpn level + with_ln=True # if to apply layer norm at the end + ): + super().__init__() + + self.in_channels = in_channels + self.out_channel = out_channel + self.scale_factor = scale_factor + + self.start_level = start_level + if end_level == -1: + self.end_level = len(in_channels) + else: + self.end_level = end_level + assert self.end_level <= len(in_channels) + assert (self.start_level >= 0) and (self.start_level < self.end_level) + + self.fpn_norms = nn.ModuleList() + for i in range(self.start_level, self.end_level): + # check feat dims + assert self.in_channels[i + self.start_level] == self.out_channel + # layer norm for order (B C T) + if with_ln: + fpn_norm = LayerNorm(out_channel) + else: + fpn_norm = nn.Identity() + self.fpn_norms.append(fpn_norm) + + def forward(self, inputs, fpn_masks): + # inputs must be a list / tuple + assert len(inputs) == len(self.in_channels) + assert len(fpn_masks) == len(self.in_channels) + + # apply norms, fpn_masks will remain the same with 1x1 convs + fpn_feats = tuple() + for i in range(len(self.fpn_norms)): + x = self.fpn_norms[i](inputs[i + self.start_level]) + fpn_feats += (x, ) + + return fpn_feats, fpn_masks diff --git a/Downstream/Temporal-Action-Localization/libs/modeling/weight_init.py b/Downstream/Temporal-Action-Localization/libs/modeling/weight_init.py new file mode 100644 index 0000000..3e5c8b7 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/modeling/weight_init.py @@ -0,0 +1,61 @@ +# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py +import torch +import math +import warnings + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__init__.py b/Downstream/Temporal-Action-Localization/libs/utils/__init__.py new file mode 100644 index 0000000..00f24e8 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/__init__.py @@ -0,0 +1,10 @@ +from .nms import batched_nms +from .metrics import ANETdetection, remove_duplicate_annotations +from .train_utils import (make_optimizer, make_scheduler, save_checkpoint, + AverageMeter, train_one_epoch, valid_one_epoch, + fix_random_seed, ModelEma) +from .postprocessing import postprocess_results + +__all__ = ['batched_nms', 'make_optimizer', 'make_scheduler', 'save_checkpoint', + 'AverageMeter', 'train_one_epoch', 'valid_one_epoch', 'ANETdetection', + 'postprocess_results', 'fix_random_seed', 'ModelEma', 'remove_duplicate_annotations'] diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/__init__.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..bdb2f4e Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/lr_schedulers.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/lr_schedulers.cpython-36.pyc new file mode 100644 index 0000000..98ddd3e Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/lr_schedulers.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/metrics.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000..73d5bd6 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/metrics.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/nms.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/nms.cpython-36.pyc new file mode 100644 index 0000000..57db431 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/nms.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/postprocessing.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/postprocessing.cpython-36.pyc new file mode 100644 index 0000000..b264a43 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/postprocessing.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/train_utils.cpython-36.pyc b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/train_utils.cpython-36.pyc new file mode 100644 index 0000000..d398df5 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/__pycache__/train_utils.cpython-36.pyc differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/build/lib.linux-x86_64-3.6/nms_1d_cpu.cpython-36m-x86_64-linux-gnu.so b/Downstream/Temporal-Action-Localization/libs/utils/build/lib.linux-x86_64-3.6/nms_1d_cpu.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000..b1d2786 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/build/lib.linux-x86_64-3.6/nms_1d_cpu.cpython-36m-x86_64-linux-gnu.so differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/.ninja_deps b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/.ninja_deps new file mode 100644 index 0000000..0960d71 Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/.ninja_deps differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/.ninja_log b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/.ninja_log new file mode 100644 index 0000000..c95ac79 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/.ninja_log @@ -0,0 +1,6 @@ +# ninja log v5 +2 45777 1652892450000000000 /mnt/cache/liuyi1.vendor/code/actionformer/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o 4893a0e4d1d5ce29 +4 41020 1658216087000000000 /mnt/cache/liuyi/code/af_base/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o a76e449256e9b2bc +346 44757 1658307921000000000 /mnt/cache/liuyi/code/af_base/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o 9614a295e4a5dc48 +207 31780 1658319734000000000 /mnt/cache/liuyi/code/af_base/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o cbd2a8e06e6c1852 +47 38716 1658321354000000000 /mnt/cache/liuyi/code/af_base/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o fb767b58cb7282e7 diff --git a/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/build.ninja b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/build.ninja new file mode 100644 index 0000000..9d023d6 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/build.ninja @@ -0,0 +1,20 @@ +ninja_required_version = 1.3 +cxx = c++ + +cflags = -pthread -B /mnt/cache/liuyi/miniconda3/envs/torch/compiler_compat -Wl,--sysroot=/ -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes -fPIC -I/mnt/cache/liuyi/miniconda3/envs/torch/lib/python3.6/site-packages/torch/include -I/mnt/cache/liuyi/miniconda3/envs/torch/lib/python3.6/site-packages/torch/include/torch/csrc/api/include -I/mnt/cache/liuyi/miniconda3/envs/torch/lib/python3.6/site-packages/torch/include/TH -I/mnt/cache/liuyi/miniconda3/envs/torch/lib/python3.6/site-packages/torch/include/THC -I/mnt/cache/liuyi/miniconda3/envs/torch/include/python3.6m -c +post_cflags = -fopenmp -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1011"' -DTORCH_EXTENSION_NAME=nms_1d_cpu -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++14 +ldflags = + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + + + +build /mnt/cache/liuyi/code/af_base/libs/utils/build/temp.linux-x86_64-3.6/./csrc/nms_cpu.o: compile /mnt/cache/liuyi/code/af_base/libs/utils/csrc/nms_cpu.cpp + + + + + diff --git a/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o new file mode 100644 index 0000000..9a510ee Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/build/temp.linux-x86_64-3.6/csrc/nms_cpu.o differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/csrc/nms_cpu.cpp b/Downstream/Temporal-Action-Localization/libs/utils/csrc/nms_cpu.cpp new file mode 100644 index 0000000..d8e8cad --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/csrc/nms_cpu.cpp @@ -0,0 +1,182 @@ +#include +#include +#include +#include + +// 1D NMS (CPU) helper functions, ported from +// https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/csrc/pytorch/nms.cpp + +using namespace at; + +#define CHECK_CPU(x) \ + TORCH_CHECK(!x.device().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_CPU_INPUT(x) \ + CHECK_CPU(x); \ + CHECK_CONTIGUOUS(x) + +Tensor nms_1d_cpu(Tensor segs, Tensor scores, float iou_threshold) { + if (segs.numel() == 0) { + return at::empty({0}, segs.options().dtype(at::kLong)); + } + auto x1_t = segs.select(1, 0).contiguous(); + auto x2_t = segs.select(1, 1).contiguous(); + + Tensor areas_t = x2_t - x1_t + 1e-6; + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto nsegs = segs.size(0); + Tensor select_t = at::ones({nsegs}, segs.options().dtype(at::kBool)); + + auto select = select_t.data_ptr(); + auto order = order_t.data_ptr(); + auto x1 = x1_t.data_ptr(); + auto x2 = x2_t.data_ptr(); + auto areas = areas_t.data_ptr(); + + for (int64_t _i = 0; _i < nsegs; _i++) { + if (select[_i] == false) continue; + auto i = order[_i]; + auto ix1 = x1[i]; + auto ix2 = x2[i]; + auto iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < nsegs; _j++) { + if (select[_j] == false) continue; + auto j = order[_j]; + auto xx1 = std::max(ix1, x1[j]); + auto xx2 = std::min(ix2, x2[j]); + + auto inter = std::max(0.f, xx2 - xx1); + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr >= iou_threshold) select[_j] = false; + } + } + return order_t.masked_select(select_t); +} + +Tensor nms_1d(Tensor segs, Tensor scores, float iou_threshold) { + CHECK_CPU_INPUT(segs); + CHECK_CPU_INPUT(scores); + return nms_1d_cpu(segs, scores, iou_threshold); + +} + +Tensor softnms_1d_cpu(Tensor segs, Tensor scores, Tensor dets, float iou_threshold, + float sigma, float min_score, int method) { + if (segs.numel() == 0) { + return at::empty({0}, segs.options().dtype(at::kLong)); + } + + auto x1_t = segs.select(1, 0).contiguous(); + auto x2_t = segs.select(1, 1).contiguous(); + auto scores_t = scores.clone(); + + Tensor areas_t = x2_t - x1_t + 1e-6; + + auto nsegs = segs.size(0); + auto x1 = x1_t.data_ptr(); + auto x2 = x2_t.data_ptr(); + auto sc = scores_t.data_ptr(); + auto areas = areas_t.data_ptr(); + auto de = dets.data_ptr(); + + int64_t pos = 0; + Tensor inds_t = at::arange(nsegs, segs.options().dtype(at::kLong)); + auto inds = inds_t.data_ptr(); + + for (int64_t i = 0; i < nsegs; i++) { + auto max_score = sc[i]; + auto max_pos = i; + + // get seg with max score + pos = i + 1; + while (pos < nsegs) { + if (max_score < sc[pos]) { + max_score = sc[pos]; + max_pos = pos; + } + pos = pos + 1; + } + // swap the current seg (i) and the seg with max score (max_pos) + auto ix1 = de[i * 3 + 0] = x1[max_pos]; + auto ix2 = de[i * 3 + 1] = x2[max_pos]; + auto iscore = de[i * 3 + 2] = sc[max_pos]; + auto iarea = areas[max_pos]; + auto iind = inds[max_pos]; + + x1[max_pos] = x1[i]; + x2[max_pos] = x2[i]; + sc[max_pos] = sc[i]; + areas[max_pos] = areas[i]; + inds[max_pos] = inds[i]; + + x1[i] = ix1; + x2[i] = ix2; + sc[i] = iscore; + areas[i] = iarea; + inds[i] = iind; + + // reset pos + pos = i + 1; + while (pos < nsegs) { + auto xx1 = std::max(ix1, x1[pos]); + auto xx2 = std::min(ix2, x2[pos]); + + auto inter = std::max(0.f, xx2 - xx1); + auto ovr = inter / (iarea + areas[pos] - inter); + + float weight = 1.; + if (method == 0) { + // vanilla nms + if (ovr >= iou_threshold) weight = 0; + } else if (method == 1) { + // linear + if (ovr >= iou_threshold) weight = 1 - ovr; + } else if (method == 2) { + // gaussian + weight = std::exp(-(ovr * ovr) / sigma); + } + sc[pos] *= weight; + + // if the score falls below threshold, discard the segment by + // swapping with last seg update N + if (sc[pos] < min_score) { + x1[pos] = x1[nsegs - 1]; + x2[pos] = x2[nsegs - 1]; + sc[pos] = sc[nsegs - 1]; + areas[pos] = areas[nsegs - 1]; + inds[pos] = inds[nsegs - 1]; + nsegs = nsegs - 1; + pos = pos - 1; + } + + pos = pos + 1; + } + } + return inds_t.slice(0, 0, nsegs); +} + +Tensor softnms_1d(Tensor segs, Tensor scores, Tensor dets, float iou_threshold, + float sigma, float min_score, int method) { + // softnms is not implemented on GPU + CHECK_CPU_INPUT(segs) + CHECK_CPU_INPUT(scores) + CHECK_CPU_INPUT(dets) + return softnms_1d_cpu(segs, scores, dets, iou_threshold, sigma, min_score, method); +} + +// bind to torch interface +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def( + "nms", &nms_1d, "nms (CPU) ", + py::arg("segs"), py::arg("scores"), py::arg("iou_threshold") + ); + m.def( + "softnms", &softnms_1d, "softnms (CPU) ", + py::arg("segs"), py::arg("scores"), py::arg("dets"), py::arg("iou_threshold"), + py::arg("sigma"), py::arg("min_score"), py::arg("method") + ); +} diff --git a/Downstream/Temporal-Action-Localization/libs/utils/dist/nms_1d_cpu-0.0.0-py3.6-linux-x86_64.egg b/Downstream/Temporal-Action-Localization/libs/utils/dist/nms_1d_cpu-0.0.0-py3.6-linux-x86_64.egg new file mode 100644 index 0000000..6bb8eed Binary files /dev/null and b/Downstream/Temporal-Action-Localization/libs/utils/dist/nms_1d_cpu-0.0.0-py3.6-linux-x86_64.egg differ diff --git a/Downstream/Temporal-Action-Localization/libs/utils/lr_schedulers.py b/Downstream/Temporal-Action-Localization/libs/utils/lr_schedulers.py new file mode 100644 index 0000000..3290fd8 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/lr_schedulers.py @@ -0,0 +1,211 @@ +import math +import warnings +from collections import Counter +from bisect import bisect_right + +import torch +from torch.optim.lr_scheduler import _LRScheduler + + +class LinearWarmupCosineAnnealingLR(_LRScheduler): + """ + Sets the learning rate of each parameter group to follow a linear warmup schedule + between warmup_start_lr and base_lr followed by a cosine annealing schedule between + base_lr and eta_min. + + .. warning:: + It is recommended to call :func:`.step()` for :class:`LinearWarmupCosineAnnealingLR` + after each iteration as calling it after each epoch will keep the starting lr at + warmup_start_lr for the first epoch which is 0 in most cases. + + .. warning:: + passing epoch to :func:`.step()` is being deprecated and comes with an EPOCH_DEPRECATION_WARNING. + It calls the :func:`_get_closed_form_lr()` method for this scheduler instead of + :func:`get_lr()`. Though this does not change the behavior of the scheduler, when passing + epoch param to :func:`.step()`, the user should call the :func:`.step()` function before calling + train and validation methods. + + Example: + >>> layer = nn.Linear(10, 1) + >>> optimizer = Adam(layer.parameters(), lr=0.02) + >>> scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=40) + >>> # + >>> # the default case + >>> for epoch in range(40): + ... # train(...) + ... # validate(...) + ... scheduler.step() + >>> # + >>> # passing epoch param case + >>> for epoch in range(40): + ... scheduler.step(epoch) + ... # train(...) + ... # validate(...) + """ + + def __init__( + self, + optimizer, + warmup_epochs, + max_epochs, + warmup_start_lr = 0.0, + eta_min = 1e-8, + last_epoch = -1, + ): + """ + Args: + optimizer (Optimizer): Wrapped optimizer. + warmup_epochs (int): Maximum number of iterations for linear warmup + max_epochs (int): Maximum number of iterations + warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + """ + self.warmup_epochs = warmup_epochs + self.max_epochs = max_epochs + self.warmup_start_lr = warmup_start_lr + self.eta_min = eta_min + + super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + """ + Compute learning rate using chainable form of the scheduler + """ + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", + UserWarning, + ) + + if self.last_epoch == 0: + return [self.warmup_start_lr] * len(self.base_lrs) + elif self.last_epoch < self.warmup_epochs: + return [ + group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + elif self.last_epoch == self.warmup_epochs: + return self.base_lrs + elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0: + return [ + group["lr"] + (base_lr - self.eta_min) * + (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2 + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + + return [ + (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) / + ( + 1 + + math.cos(math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs)) + ) * (group["lr"] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self): + """ + Called when epoch is passed as a param to the `step` function of the scheduler. + """ + if self.last_epoch < self.warmup_epochs: + return [ + self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr in self.base_lrs + ] + + return [ + self.eta_min + 0.5 * (base_lr - self.eta_min) * + (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) + for base_lr in self.base_lrs + ] + + +class LinearWarmupMultiStepLR(_LRScheduler): + """ + Sets the learning rate of each parameter group to follow a linear warmup schedule + between warmup_start_lr and base_lr followed by a multi-step schedule that decays + the learning rate of each parameter group by gamma once the + number of epoch reaches one of the milestones. + + .. warning:: + It is recommended to call :func:`.step()` for :class:`LinearWarmupCosineAnnealingLR` + after each iteration as calling it after each epoch will keep the starting lr at + warmup_start_lr for the first epoch which is 0 in most cases. + + .. warning:: + passing epoch to :func:`.step()` is being deprecated and comes with an EPOCH_DEPRECATION_WARNING. + It calls the :func:`_get_closed_form_lr()` method for this scheduler instead of + :func:`get_lr()`. Though this does not change the behavior of the scheduler, when passing + epoch param to :func:`.step()`, the user should call the :func:`.step()` function before calling + train and validation methods. + """ + + def __init__( + self, + optimizer, + warmup_epochs, + milestones, + warmup_start_lr = 0.0, + gamma = 0.1, + last_epoch = -1, + ): + """ + Args: + optimizer (Optimizer): Wrapped optimizer. + warmup_epochs (int): Maximum number of iterations for linear warmup + max_epochs (int): Maximum number of iterations + milestones (list): List of epoch indices. Must be increasing. + warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + """ + self.warmup_epochs = warmup_epochs + self.warmup_start_lr = warmup_start_lr + self.milestones = Counter(milestones) + self.gamma = gamma + + super(LinearWarmupMultiStepLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + """ + Compute learning rate using chainable form of the scheduler + """ + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch == 0: + # starting warm up + return [self.warmup_start_lr] * len(self.base_lrs) + elif self.last_epoch < self.warmup_epochs: + # linear warm up (0 ~ self.warmup_epochs -1) + return [ + group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + elif self.last_epoch == self.warmup_epochs: + # end of warm up (reset to base lrs) + return self.base_lrs + elif (self.last_epoch - self.warmup_epochs) not in self.milestones: + # in between the steps + return [group['lr'] for group in self.optimizer.param_groups] + + return [ + group['lr'] * self.gamma ** self.milestones[self.last_epoch - self.warmup_epochs] + for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self): + """ + Called when epoch is passed as a param to the `step` function of the scheduler. + """ + if self.last_epoch < self.warmup_epochs: + return [ + self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr in self.base_lrs + ] + + milestones = list(sorted(self.milestones.elements())) + return [base_lr * self.gamma ** bisect_right(milestones, self.last_epoch - self.warmup_epochs) + for base_lr in self.base_lrs] diff --git a/Downstream/Temporal-Action-Localization/libs/utils/metrics.py b/Downstream/Temporal-Action-Localization/libs/utils/metrics.py new file mode 100644 index 0000000..3cdaea5 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/metrics.py @@ -0,0 +1,340 @@ +# Modified from official EPIC-Kitchens action detection evaluation code +# see https://github.com/epic-kitchens/C2-Action-Detection/blob/master/EvaluationCode/evaluate_detection_json_ek100.py +import os +import json +import pandas as pd +import numpy as np +from joblib import Parallel, delayed +from typing import List +from typing import Tuple +from typing import Dict + + +def remove_duplicate_annotations(ants, tol=1e-3): + # remove duplicate annotations (same category and starting/ending time) + valid_events = [] + for event in ants: + s, e, l = event['segment'][0], event['segment'][1], event['label_id'] + valid = True + for p_event in valid_events: + if ((abs(s-p_event['segment'][0]) <= tol) + and (abs(e-p_event['segment'][1]) <= tol) + and (l == p_event['label_id']) + ): + valid = False + break + if valid: + valid_events.append(event) + return valid_events + + +def load_gt_seg_from_json(json_file, split=None, label='label_id', label_offset=0): + # load json file + with open(json_file, "r", encoding="utf8") as f: + json_db = json.load(f) + json_db = json_db['database'] + + vids, starts, stops, labels = [], [], [], [] + for k, v in json_db.items(): + + # filter based on split + if (split is not None) and v['subset'].lower() != split: + continue + # remove duplicated instances + ants = remove_duplicate_annotations(v['annotations']) + # video id + vids += [k] * len(ants) + # for each event, grab the start/end time and label + for event in ants: + starts += [float(event['segment'][0])] + stops += [float(event['segment'][1])] + if isinstance(event[label], (Tuple, List)): + # offset the labels by label_offset + label_id = 0 + for i, x in enumerate(event[label][::-1]): + label_id += label_offset**i + int(x) + else: + # load label_id directly + label_id = int(event[label]) + labels += [label_id] + + # move to pd dataframe + gt_base = pd.DataFrame({ + 'video-id' : vids, + 't-start' : starts, + 't-end': stops, + 'label': labels + }) + + return gt_base + + +def load_pred_seg_from_json(json_file, label='label_id', label_offset=0): + # load json file + with open(json_file, "r", encoding="utf8") as f: + json_db = json.load(f) + json_db = json_db['database'] + + vids, starts, stops, labels, scores = [], [], [], [], [] + for k, v, in json_db.items(): + # video id + vids += [k] * len(v) + # for each event + for event in v: + starts += [float(event['segment'][0])] + stops += [float(event['segment'][1])] + if isinstance(event[label], (Tuple, List)): + # offset the labels by label_offset + label_id = 0 + for i, x in enumerate(event[label][::-1]): + label_id += label_offset**i + int(x) + else: + # load label_id directly + label_id = int(event[label]) + labels += [label_id] + scores += [float(event['scores'])] + + # move to pd dataframe + pred_base = pd.DataFrame({ + 'video-id' : vids, + 't-start' : starts, + 't-end': stops, + 'label': labels, + 'score': scores + }) + + return pred_base + + +class ANETdetection(object): + """Adapted from https://github.com/activitynet/ActivityNet/blob/master/Evaluation/eval_detection.py""" + + def __init__( + self, + ant_file, + split=None, + tiou_thresholds=np.linspace(0.1, 0.5, 5), + label='label_id', + label_offset=0, + num_workers=8, + dataset_name=None, + ): + + self.tiou_thresholds = tiou_thresholds + self.ap = None + self.num_workers = num_workers + if dataset_name is not None: + self.dataset_name = dataset_name + else: + self.dataset_name = os.path.basename(ant_file).replace('.json', '') + + # Import ground truth and predictions + self.split = split + self.ground_truth = load_gt_seg_from_json( + ant_file, split=self.split, label=label, label_offset=label_offset) + + # remove labels that does not exists in gt + self.activity_index = {j: i for i, j in enumerate(sorted(self.ground_truth['label'].unique()))} + self.ground_truth['label']=self.ground_truth['label'].replace(self.activity_index) + + def _get_predictions_with_label(self, prediction_by_label, label_name, cidx): + """Get all predicitons of the given label. Return empty DataFrame if there + is no predcitions with the given label. + """ + try: + res = prediction_by_label.get_group(cidx).reset_index(drop=True) + return res + except: + print('Warning: No predictions of label \'%s\' were provdied.' % label_name) + return pd.DataFrame() + + def wrapper_compute_average_precision(self, preds): + """Computes average precision for each class in the subset. + """ + ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index))) + + # Adaptation to query faster + ground_truth_by_label = self.ground_truth.groupby('label') + prediction_by_label = preds.groupby('label') + + results = Parallel(n_jobs=self.num_workers)( + delayed(compute_average_precision_detection)( + ground_truth=ground_truth_by_label.get_group(cidx).reset_index(drop=True), + prediction=self._get_predictions_with_label(prediction_by_label, label_name, cidx), + tiou_thresholds=self.tiou_thresholds, + ) for label_name, cidx in self.activity_index.items()) + + for i, cidx in enumerate(self.activity_index.values()): + ap[:,cidx] = results[i] + + return ap + + def evaluate(self, preds, verbose=True): + """Evaluates a prediction file. For the detection task we measure the + interpolated mean average precision to measure the performance of a + method. + preds can be (1) a pd.DataFrame; or (2) a json file where the data will be loaded; + or (3) a python dict item with numpy arrays as the values + """ + + if isinstance(preds, pd.DataFrame): + assert 'label' in preds + elif isinstance(preds, str) and os.path.isfile(preds): + preds = load_pred_seg_from_json(preds) + elif isinstance(preds, Dict): + # move to pd dataframe + # did not check dtype here, can accept both numpy / pytorch tensors + preds = pd.DataFrame({ + 'video-id' : preds['video-id'], + 't-start' : preds['t-start'].tolist(), + 't-end': preds['t-end'].tolist(), + 'label': preds['label'].tolist(), + 'score': preds['score'].tolist() + }) + # always reset ap + self.ap = None + + # make the label ids consistent + preds['label'] = preds['label'].replace(self.activity_index) + + # compute mAP + self.ap = self.wrapper_compute_average_precision(preds) + mAP = self.ap.mean(axis=1) + average_mAP = mAP.mean() + + # print results + if verbose: + # print the results + print('[RESULTS] Action detection results on {:s}.'.format( + self.dataset_name) + ) + block = '' + for tiou, tiou_mAP in zip(self.tiou_thresholds, mAP): + block += '\n|tIoU = {:.2f}: mAP = {:.2f} (%)'.format(tiou, tiou_mAP*100) + print(block) + print('Avearge mAP: {:.2f} (%)'.format(average_mAP*100)) + + # return the results + return mAP, average_mAP + + +def compute_average_precision_detection( + ground_truth, + prediction, + tiou_thresholds=np.linspace(0.1, 0.5, 5) +): + """Compute average precision (detection task) between ground truth and + predictions data frames. If multiple predictions occurs for the same + predicted segment, only the one with highest score is matches as + true positive. This code is greatly inspired by Pascal VOC devkit. + Parameters + ---------- + ground_truth : df + Data frame containing the ground truth instances. + Required fields: ['video-id', 't-start', 't-end'] + prediction : df + Data frame containing the prediction instances. + Required fields: ['video-id, 't-start', 't-end', 'score'] + tiou_thresholds : 1darray, optional + Temporal intersection over union threshold. + Outputs + ------- + ap : float + Average precision score. + """ + ap = np.zeros(len(tiou_thresholds)) + if prediction.empty: + return ap + + npos = float(len(ground_truth)) + lock_gt = np.ones((len(tiou_thresholds),len(ground_truth))) * -1 + # Sort predictions by decreasing score order. + sort_idx = prediction['score'].values.argsort()[::-1] + prediction = prediction.loc[sort_idx].reset_index(drop=True) + + # Initialize true positive and false positive vectors. + tp = np.zeros((len(tiou_thresholds), len(prediction))) + fp = np.zeros((len(tiou_thresholds), len(prediction))) + + # Adaptation to query faster + ground_truth_gbvn = ground_truth.groupby('video-id') + + # Assigning true positive to truly ground truth instances. + for idx, this_pred in prediction.iterrows(): + + try: + # Check if there is at least one ground truth in the video associated. + ground_truth_videoid = ground_truth_gbvn.get_group(this_pred['video-id']) + except Exception as e: + fp[:, idx] = 1 + continue + + this_gt = ground_truth_videoid.reset_index() + tiou_arr = segment_iou(this_pred[['t-start', 't-end']].values, + this_gt[['t-start', 't-end']].values) + # We would like to retrieve the predictions with highest tiou score. + tiou_sorted_idx = tiou_arr.argsort()[::-1] + for tidx, tiou_thr in enumerate(tiou_thresholds): + for jdx in tiou_sorted_idx: + if tiou_arr[jdx] < tiou_thr: + fp[tidx, idx] = 1 + break + if lock_gt[tidx, this_gt.loc[jdx]['index']] >= 0: + continue + # Assign as true positive after the filters above. + tp[tidx, idx] = 1 + lock_gt[tidx, this_gt.loc[jdx]['index']] = idx + break + + if fp[tidx, idx] == 0 and tp[tidx, idx] == 0: + fp[tidx, idx] = 1 + + tp_cumsum = np.cumsum(tp, axis=1).astype(np.float) + fp_cumsum = np.cumsum(fp, axis=1).astype(np.float) + recall_cumsum = tp_cumsum / npos + + precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum) + + for tidx in range(len(tiou_thresholds)): + ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:]) + + return ap + + +def segment_iou(target_segment, candidate_segments): + """Compute the temporal intersection over union between a + target segment and all the test segments. + Parameters + ---------- + target_segment : 1d array + Temporal target segment containing [starting, ending] times. + candidate_segments : 2d array + Temporal candidate segments containing N x [starting, ending] times. + Outputs + ------- + tiou : 1d array + Temporal intersection over union score of the N's candidate segments. + """ + tt1 = np.maximum(target_segment[0], candidate_segments[:, 0]) + tt2 = np.minimum(target_segment[1], candidate_segments[:, 1]) + # Intersection including Non-negative overlap score. + segments_intersection = (tt2 - tt1).clip(0) + # Segment union. + segments_union = (candidate_segments[:, 1] - candidate_segments[:, 0]) \ + + (target_segment[1] - target_segment[0]) - segments_intersection + # Compute overlap as the ratio of the intersection + # over union of two segments. + tIoU = segments_intersection.astype(float) / segments_union + return tIoU + + +def interpolated_prec_rec(prec, rec): + """Interpolated AP - VOCdevkit from VOC 2011. + """ + mprec = np.hstack([[0], prec, [0]]) + mrec = np.hstack([[0], rec, [1]]) + for i in range(len(mprec) - 1)[::-1]: + mprec[i] = max(mprec[i], mprec[i + 1]) + idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1 + ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx]) + return ap diff --git a/Downstream/Temporal-Action-Localization/libs/utils/nms.py b/Downstream/Temporal-Action-Localization/libs/utils/nms.py new file mode 100644 index 0000000..4693563 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/nms.py @@ -0,0 +1,190 @@ +# Functions for 1D NMS, modified from: +# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/nms.py +import torch + +import nms_1d_cpu + + +class NMSop(torch.autograd.Function): + @staticmethod + def forward( + ctx, segs, scores, cls_idxs, + iou_threshold, min_score, max_num + ): + # vanilla nms will not change the score, so we can filter segs first + is_filtering_by_score = (min_score > 0) + if is_filtering_by_score: + valid_mask = scores > min_score + segs, scores = segs[valid_mask], scores[valid_mask] + cls_idxs = cls_idxs[valid_mask] + valid_inds = torch.nonzero( + valid_mask, as_tuple=False).squeeze(dim=1) + + # nms op; return inds that is sorted by descending order + inds = nms_1d_cpu.nms( + segs.contiguous().cpu(), + scores.contiguous().cpu(), + iou_threshold=float(iou_threshold)) + # cap by max number + if max_num > 0: + inds = inds[:min(max_num, len(inds))] + # return the sorted segs / scores + sorted_segs = segs[inds] + sorted_scores = scores[inds] + sorted_cls_idxs = cls_idxs[inds] + return sorted_segs.clone(), sorted_scores.clone(), sorted_cls_idxs.clone() + + +class SoftNMSop(torch.autograd.Function): + @staticmethod + def forward( + ctx, segs, scores, cls_idxs, + iou_threshold, sigma, min_score, method, max_num + ): + # pre allocate memory for sorted results + dets = segs.new_empty((segs.size(0), 3), device='cpu') + # softnms op, return dets that stores the sorted segs / scores + inds = nms_1d_cpu.softnms( + segs.cpu(), + scores.cpu(), + dets.cpu(), + iou_threshold=float(iou_threshold), + sigma=float(sigma), + min_score=float(min_score), + method=int(method)) + # cap by max number + if max_num > 0: + n_segs = min(len(inds), max_num) + else: + n_segs = len(inds) + sorted_segs = dets[:n_segs, :2] + sorted_scores = dets[:n_segs, 2] + sorted_cls_idxs = cls_idxs[inds] + sorted_cls_idxs = sorted_cls_idxs[:n_segs] + return sorted_segs.clone(), sorted_scores.clone(), sorted_cls_idxs.clone() + + +def seg_voting(nms_segs, all_segs, all_scores, iou_threshold, score_offset=1.5): + """ + blur localization results by incorporating side segs. + this is known as bounding box voting in object detection literature. + slightly boost the performance around iou_threshold + """ + + # *_segs : N_i x 2, all_scores: N, + # apply offset + offset_scores = all_scores + score_offset + + # computer overlap between nms and all segs + # construct the distance matrix of # N_nms x # N_all + num_nms_segs, num_all_segs = nms_segs.shape[0], all_segs.shape[0] + ex_nms_segs = nms_segs[:, None].expand(num_nms_segs, num_all_segs, 2) + ex_all_segs = all_segs[None, :].expand(num_nms_segs, num_all_segs, 2) + + # compute intersection + left = torch.maximum(ex_nms_segs[:, :, 0], ex_all_segs[:, :, 0]) + right = torch.minimum(ex_nms_segs[:, :, 1], ex_all_segs[:, :, 1]) + inter = (right-left).clamp(min=0) + + # lens of all segments + nms_seg_lens = ex_nms_segs[:, :, 1] - ex_nms_segs[:, :, 0] + all_seg_lens = ex_all_segs[:, :, 1] - ex_all_segs[:, :, 0] + + # iou + iou = inter / (nms_seg_lens + all_seg_lens - inter) + + # get neighbors (# N_nms x # N_all) / weights + seg_weights = (iou >= iou_threshold).to(all_scores.dtype) * all_scores[None, :] + seg_weights /= torch.sum(seg_weights, dim=1, keepdim=True) + refined_segs = seg_weights @ all_segs + + return refined_segs + +def batched_nms( + segs, + scores, + cls_idxs, + iou_threshold, + min_score, + max_seg_num, + use_soft_nms=True, + multiclass=True, + sigma=0.5, + voting_thresh=0.75, +): + # Based on Detectron2 implementation, + num_segs = segs.shape[0] + # corner case, no prediction outputs + if num_segs == 0: + return torch.zeros([0, 2]),\ + torch.zeros([0,]),\ + torch.zeros([0,], dtype=cls_idxs.dtype) + + if multiclass: + # multiclass nms: apply nms on each class independently + new_segs, new_scores, new_cls_idxs = [], [], [] + for class_id in torch.unique(cls_idxs): + curr_indices = torch.where(cls_idxs == class_id)[0] + # soft_nms vs nms + if use_soft_nms: + sorted_segs, sorted_scores, sorted_cls_idxs = SoftNMSop.apply( + segs[curr_indices], + scores[curr_indices], + cls_idxs[curr_indices], + iou_threshold, + sigma, + min_score, + 2, + max_seg_num + ) + else: + sorted_segs, sorted_scores, sorted_cls_idxs = NMSop.apply( + segs[curr_indices], + scores[curr_indices], + cls_idxs[curr_indices], + iou_threshold, + min_score, + max_seg_num + ) + # disable seg voting for multiclass nms, no sufficient segs + + # fill in the class index + new_segs.append(sorted_segs) + new_scores.append(sorted_scores) + new_cls_idxs.append(sorted_cls_idxs) + + # cat the results + new_segs = torch.cat(new_segs) + new_scores = torch.cat(new_scores) + new_cls_idxs = torch.cat(new_cls_idxs) + + else: + # class agnostic + if use_soft_nms: + new_segs, new_scores, new_cls_idxs = SoftNMSop.apply( + segs, scores, cls_idxs, iou_threshold, + sigma, min_score, 2, max_seg_num + ) + else: + new_segs, new_scores, new_cls_idxs = NMSop.apply( + segs, scores, cls_idxs, iou_threshold, + min_score, max_seg_num + ) + # seg voting + if voting_thresh > 0: + new_segs = seg_voting( + new_segs, + segs, + scores, + voting_thresh + ) + + # sort based on scores and return + # truncate the results based on max_seg_num + _, idxs = new_scores.sort(descending=True) + max_seg_num = min(max_seg_num, new_segs.shape[0]) + # needed for multiclass NMS + new_segs = new_segs[idxs[:max_seg_num]] + new_scores = new_scores[idxs[:max_seg_num]] + new_cls_idxs = new_cls_idxs[idxs[:max_seg_num]] + return new_segs, new_scores, new_cls_idxs diff --git a/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/PKG-INFO b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/PKG-INFO new file mode 100644 index 0000000..a08a53a --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/PKG-INFO @@ -0,0 +1,10 @@ +Metadata-Version: 1.0 +Name: nms-1d-cpu +Version: 0.0.0 +Summary: UNKNOWN +Home-page: UNKNOWN +Author: UNKNOWN +Author-email: UNKNOWN +License: UNKNOWN +Description: UNKNOWN +Platform: UNKNOWN diff --git a/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/SOURCES.txt b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/SOURCES.txt new file mode 100644 index 0000000..026b3b7 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/SOURCES.txt @@ -0,0 +1,6 @@ +setup.py +./csrc/nms_cpu.cpp +nms_1d_cpu.egg-info/PKG-INFO +nms_1d_cpu.egg-info/SOURCES.txt +nms_1d_cpu.egg-info/dependency_links.txt +nms_1d_cpu.egg-info/top_level.txt \ No newline at end of file diff --git a/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/dependency_links.txt b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/top_level.txt b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/top_level.txt new file mode 100644 index 0000000..5c65e3a --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/nms_1d_cpu.egg-info/top_level.txt @@ -0,0 +1 @@ +nms_1d_cpu diff --git a/Downstream/Temporal-Action-Localization/libs/utils/postprocessing.py b/Downstream/Temporal-Action-Localization/libs/utils/postprocessing.py new file mode 100644 index 0000000..fb10f43 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/postprocessing.py @@ -0,0 +1,166 @@ +import os +import shutil +import time +import json +import pickle +from typing import Dict + +import numpy as np +import pdb +import torch +from scipy.special import softmax +from .metrics import ANETdetection + + +# def load_results_from_pkl(filename): +# # load from pickle file +# assert os.path.isfile(filename) +# with open(filename, "rb") as f: +# results = pickle.load(f) +# return results + +def load_results_from_json(filename): + assert os.path.isfile(filename) + with open(filename, "r") as f: + results = json.load(f) + # for activity net external classification scores + if 'results' in results: + results = results['results'] + return results + +def results_to_dict(results): + """convert result arrays into dict used by json files""" + # video ids and allocate the dict + vidxs = sorted(list(set(results['video-id']))) + results_dict = {} + for vidx in vidxs: + results_dict[vidx] = [] + + # fill in the dict + for vidx, start, end, label, score in zip( + results['video-id'], + results['t-start'], + results['t-end'], + results['label'], + results['score'] + ): + results_dict[vidx].append( + { + "label" : int(label), + "score" : float(score), + "segment": [float(start), float(end)], + } + ) + return results_dict + + +def results_to_array(results, num_pred): + # video ids and allocate the dict + vidxs = sorted(list(set(results['video-id']))) + results_dict = {} + for vidx in vidxs: + results_dict[vidx] = { + 'label' : [], + 'score' : [], + 'segment' : [], + } + + # fill in the dict + for vidx, start, end, label, score in zip( + results['video-id'], + results['t-start'], + results['t-end'], + results['label'], + results['score'] + ): + results_dict[vidx]['label'].append(int(label)) + results_dict[vidx]['score'].append(float(score)) + results_dict[vidx]['segment'].append( + [float(start), float(end)] + ) + + for vidx in vidxs: + label = np.asarray(results_dict[vidx]['label']) + score = np.asarray(results_dict[vidx]['score']) + segment = np.asarray(results_dict[vidx]['segment']) + + # the score should be already sorted, just for safety + inds = np.argsort(score)[::-1][:num_pred] + label, score, segment = label[inds], score[inds], segment[inds] + results_dict[vidx]['label'] = label + results_dict[vidx]['score'] = score + results_dict[vidx]['segment'] = segment + + return results_dict + + +def postprocess_results(results, cls_score_file, num_pred=200, topk=2): + + # load results and convert to dict + # if isinstance(results, str): + # results = load_results_from_pkl(results) + # array -> dict + results = results_to_array(results, num_pred) + + # load external classification scores + if '.json' in cls_score_file: + cls_scores = load_results_from_json(cls_score_file) + # else: + # cls_scores = load_results_from_pkl(cls_score_file) + + # dict for processed results + processed_results = { + 'video-id': [], + 't-start' : [], + 't-end': [], + 'label': [], + 'score': [] + } + + # process each video + for vid, result in results.items(): + + # pick top k cls scores and idx + if len(cls_scores[vid])==1: + curr_cls_scores = np.asarray(cls_scores[vid][0]) + else: + curr_cls_scores = np.asarray(cls_scores[vid]) + + if max(curr_cls_scores)>1 or min(curr_cls_scores)<0: + curr_cls_scores=softmax(curr_cls_scores) + + topk_cls_idx = np.argsort(curr_cls_scores)[::-1][:topk] + topk_cls_score = curr_cls_scores[topk_cls_idx] + + # model outputs + pred_score, pred_segment, pred_label = \ + result['score'], result['segment'], result['label'] + num_segs = min(num_pred, len(pred_score)) + + # duplicate all segment and assign the topk labels + # K x 1 @ 1 N -> K x N -> KN + # multiply the scores + # temp = np.abs(topk_cls_score[:, None] @ pred_score[None, :]) + # new_pred_score = np.sqrt(temp).flatten() + new_pred_score = np.sqrt(topk_cls_score[:, None] @ pred_score[None, :]).flatten() + new_pred_segment = np.tile(pred_segment, (topk, 1)) + new_pred_label = np.tile(topk_cls_idx[:, None], (1, num_segs)).flatten() + + # add to result + processed_results['video-id'].extend([vid]*num_segs*topk) + processed_results['t-start'].append(new_pred_segment[:, 0]) + processed_results['t-end'].append(new_pred_segment[:, 1]) + processed_results['label'].append(new_pred_label) + processed_results['score'].append(new_pred_score) + # pdb.set_trace() + + processed_results['t-start'] = np.concatenate( + processed_results['t-start'], axis=0) + processed_results['t-end'] = np.concatenate( + processed_results['t-end'], axis=0) + processed_results['label'] = np.concatenate( + processed_results['label'],axis=0) + processed_results['score'] = np.concatenate( + processed_results['score'], axis=0) + + return processed_results diff --git a/Downstream/Temporal-Action-Localization/libs/utils/setup.py b/Downstream/Temporal-Action-Localization/libs/utils/setup.py new file mode 100644 index 0000000..3606fdf --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/setup.py @@ -0,0 +1,19 @@ +import torch + +from setuptools import setup, Extension +from torch.utils.cpp_extension import BuildExtension, CppExtension + + +setup( + name='nms_1d_cpu', + ext_modules=[ + CppExtension( + name = 'nms_1d_cpu', + sources = ['./csrc/nms_cpu.cpp'], + extra_compile_args=['-fopenmp'] + ) + ], + cmdclass={ + 'build_ext': BuildExtension + } +) diff --git a/Downstream/Temporal-Action-Localization/libs/utils/train_utils.py b/Downstream/Temporal-Action-Localization/libs/utils/train_utils.py new file mode 100644 index 0000000..4a3d351 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/libs/utils/train_utils.py @@ -0,0 +1,439 @@ +import os +import shutil +import time +import pickle + +import numpy as np +import random +from copy import deepcopy + +import torch +import torch.optim as optim +import torch.backends.cudnn as cudnn + +from .lr_schedulers import LinearWarmupMultiStepLR, LinearWarmupCosineAnnealingLR +from .postprocessing import postprocess_results +from ..modeling import MaskedConv1D, Scale, AffineDropPath, LayerNorm +from tqdm import tqdm +import json + +################################################################################ +def fix_random_seed(seed, include_cuda=True): + rng_generator = torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + if include_cuda: + # training: disable cudnn benchmark to ensure the reproducibility + cudnn.enabled = True + cudnn.benchmark = False + cudnn.deterministic = True + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # this is needed for CUDA >= 10.2 + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" + # torch.use_deterministic_algorithms(True, warn_only=True) + torch.use_deterministic_algorithms(True) + else: + cudnn.enabled = True + cudnn.benchmark = True + return rng_generator + + +def save_checkpoint(state, is_best, file_folder, + file_name='checkpoint.pth.tar'): + """save checkpoint to file""" + if not os.path.exists(file_folder): + os.mkdir(file_folder) + torch.save(state, os.path.join(file_folder, file_name)) + if is_best: + # skip the optimization / scheduler state + state.pop('optimizer', None) + state.pop('scheduler', None) + torch.save(state, os.path.join(file_folder, 'model_best.pth.tar')) + + +def print_model_params(model): + for name, param in model.named_parameters(): + print(name, param.min().item(), param.max().item(), param.mean().item()) + return + + +def make_optimizer(model, optimizer_config): + """create optimizer + return a supported optimizer + """ + # separate out all parameters that with / without weight decay + # see https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L134 + decay = set() + no_decay = set() + whitelist_weight_modules = (torch.nn.Linear, torch.nn.Conv1d, MaskedConv1D) + blacklist_weight_modules = (LayerNorm, torch.nn.GroupNorm) + + # loop over all modules / params + for mn, m in model.named_modules(): + for pn, p in m.named_parameters(): + fpn = '%s.%s' % (mn, pn) if mn else pn # full param name + if pn.endswith('bias'): + # all biases will not be decayed + no_decay.add(fpn) + elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules): + # weights of whitelist modules will be weight decayed + decay.add(fpn) + elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules): + # weights of blacklist modules will NOT be weight decayed + no_decay.add(fpn) + elif pn.endswith('scale') and isinstance(m, (Scale, AffineDropPath)): + # corner case of our scale layer + no_decay.add(fpn) + elif pn.endswith('rel_pe'): + # corner case for relative position encoding + no_decay.add(fpn) + + # validate that we considered every parameter + param_dict = {pn: p for pn, p in model.named_parameters()} + inter_params = decay & no_decay + union_params = decay | no_decay + assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), ) + assert len(param_dict.keys() - union_params) == 0, \ + "parameters %s were not separated into either decay/no_decay set!" \ + % (str(param_dict.keys() - union_params), ) + + # create the pytorch optimizer object + optim_groups = [ + {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": optimizer_config['weight_decay']}, + {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0}, + ] + + if optimizer_config["type"] == "SGD": + optimizer = optim.SGD( + optim_groups, + lr=optimizer_config["learning_rate"], + momentum=optimizer_config["momentum"] + ) + elif optimizer_config["type"] == "AdamW": + optimizer = optim.AdamW( + optim_groups, + lr=optimizer_config["learning_rate"] + ) + else: + raise TypeError("Unsupported optimizer!") + + return optimizer + + +def make_scheduler( + optimizer, + optimizer_config, + num_iters_per_epoch, + last_epoch=-1 +): + """create scheduler + return a supported scheduler + All scheduler returned by this function should step every iteration + """ + if optimizer_config["warmup"]: + max_epochs = optimizer_config["epochs"] + optimizer_config["warmup_epochs"] + max_steps = max_epochs * num_iters_per_epoch + + # get warmup params + warmup_epochs = optimizer_config["warmup_epochs"] + warmup_steps = warmup_epochs * num_iters_per_epoch + + # with linear warmup: call our custom schedulers + if optimizer_config["schedule_type"] == "cosine": + # Cosine + scheduler = LinearWarmupCosineAnnealingLR( + optimizer, + warmup_steps, + max_steps, + last_epoch=last_epoch + ) + + elif optimizer_config["schedule_type"] == "multistep": + # Multi step + steps = [num_iters_per_epoch * step for step in optimizer_config["schedule_steps"]] + scheduler = LinearWarmupMultiStepLR( + optimizer, + warmup_steps, + steps, + gamma=optimizer_config["schedule_gamma"], + last_epoch=last_epoch + ) + else: + raise TypeError("Unsupported scheduler!") + + else: + max_epochs = optimizer_config["epochs"] + max_steps = max_epochs * num_iters_per_epoch + + # without warmup: call default schedulers + if optimizer_config["schedule_type"] == "cosine": + # step per iteration + scheduler = optim.lr_scheduler.CosineAnnealingLR( + optimizer, + max_steps, + last_epoch=last_epoch + ) + + elif optimizer_config["schedule_type"] == "multistep": + # step every some epochs + steps = [num_iters_per_epoch * step for step in optimizer_config["schedule_steps"]] + scheduler = optim.lr_scheduler.MultiStepLR( + optimizer, + steps, + gamma=schedule_config["gamma"], + last_epoch=last_epoch + ) + else: + raise TypeError("Unsupported scheduler!") + + return scheduler + + +class AverageMeter(object): + """Computes and stores the average and current value. + Used to compute dataset stats from mini-batches + """ + def __init__(self): + self.initialized = False + self.val = None + self.avg = None + self.sum = None + self.count = 0.0 + + def initialize(self, val, n): + self.val = val + self.avg = val + self.sum = val * n + self.count = n + self.initialized = True + + def update(self, val, n=1): + if not self.initialized: + self.initialize(val, n) + else: + self.add(val, n) + + def add(self, val, n): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +class ModelEma(torch.nn.Module): + def __init__(self, model, decay=0.999, device=None): + super().__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) + + +################################################################################ +def train_one_epoch( + train_loader, + model, + optimizer, + scheduler, + curr_epoch, + model_ema = None, + clip_grad_l2norm = -1, + tb_writer = None, + print_freq = 20 +): + """Training the model for one epoch""" + # set up meters + batch_time = AverageMeter() + losses_tracker = {} + # number of iterations per epoch + num_iters = len(train_loader) + # switch to train mode + model.train() + + # main training loop + print("\n[Train]: Epoch {:d} started".format(curr_epoch)) + start = time.time() + for iter_idx, video_list in (enumerate(train_loader, 0)): + # zero out optim + optimizer.zero_grad(set_to_none=True) + # forward / backward the model + losses = model(video_list) + losses['final_loss'].backward() + # gradient cliping (to stabilize training if necessary) + if clip_grad_l2norm > 0.0: + torch.nn.utils.clip_grad_norm_( + model.parameters(), + clip_grad_l2norm + ) + # step optimizer / scheduler + optimizer.step() + scheduler.step() + + if model_ema is not None: + model_ema.update(model) + + # printing (only check the stats when necessary to avoid extra cost) + if (iter_idx != 0) and (iter_idx % print_freq) == 0: + # measure elapsed time (sync all kernels) + torch.cuda.synchronize() + batch_time.update((time.time() - start) / print_freq) + start = time.time() + + # track all losses + for key, value in losses.items(): + # init meter if necessary + if key not in losses_tracker: + losses_tracker[key] = AverageMeter() + # update + losses_tracker[key].update(value.item()) + + # log to tensor board + lr = scheduler.get_last_lr()[0] + global_step = curr_epoch * num_iters + iter_idx + if tb_writer is not None: + # learning rate (after stepping) + tb_writer.add_scalar( + 'train/learning_rate', + lr, + global_step + ) + # all losses + tag_dict = {} + for key, value in losses_tracker.items(): + if key != "final_loss": + tag_dict[key] = value.val + tb_writer.add_scalars( + 'train/all_losses', + tag_dict, + global_step + ) + # final loss + tb_writer.add_scalar( + 'train/final_loss', + losses_tracker['final_loss'].val, + global_step + ) + + # print to terminal + block1 = 'Epoch: [{:03d}][{:05d}/{:05d}]'.format( + curr_epoch, iter_idx, num_iters + ) + block2 = 'Time {:.2f} ({:.2f})'.format( + batch_time.val, batch_time.avg + ) + block3 = 'Loss {:.2f} ({:.2f})\n'.format( + losses_tracker['final_loss'].val, + losses_tracker['final_loss'].avg + ) + block4 = '' + for key, value in losses_tracker.items(): + if key != "final_loss": + block4 += '\t{:s} {:.2f} ({:.2f})'.format( + key, value.val, value.avg + ) + + # print('\t'.join([block1, block2, block3, block4])) + + # finish up and print + lr = scheduler.get_last_lr()[0] + # print("[Train]: Epoch {:d} finished with lr={:.8f}\n".format(curr_epoch, lr)) + return + + +def valid_one_epoch( + val_loader, + model, + curr_epoch, + ext_score_file = None, + evaluator = None, + output_file = None, + tb_writer = None, + print_freq = 20 +): + """Test the model on the validation set""" + # either evaluate the results or save the results + # assert (evaluator is not None) or (output_file is not None) + + # set up meters + batch_time = AverageMeter() + # switch to evaluate mode + model.eval() + # dict for results (for our evaluation code) + results = { + 'video-id': [], + 't-start' : [], + 't-end': [], + 'label': [], + 'score': [] + } + + # loop over validation set + start = time.time() + for iter_idx, video_list in enumerate(val_loader, 0): + # forward the model (wo. grad) + with torch.no_grad(): + output = model(video_list) + + # upack the results into ANet format + num_vids = len(output) + for vid_idx in range(num_vids): + if output[vid_idx]['segments'].shape[0] > 0: + results['video-id'].extend( + [output[vid_idx]['video_id']] * + output[vid_idx]['segments'].shape[0] + ) + results['t-start'].append(output[vid_idx]['segments'][:, 0]) + results['t-end'].append(output[vid_idx]['segments'][:, 1]) + results['label'].append(output[vid_idx]['labels']) + results['score'].append(output[vid_idx]['scores']) + + # printing + # if (iter_idx != 0) and iter_idx % (print_freq) == 0: + # # measure elapsed time (sync all kernels) + # torch.cuda.synchronize() + # batch_time.update((time.time() - start) / print_freq) + # start = time.time() + + # # print timing + # print('Test: [{0:05d}/{1:05d}]\t' + # 'Time {batch_time.val:.2f} ({batch_time.avg:.2f})'.format( + # iter_idx, len(val_loader), batch_time=batch_time)) + + # gather all stats and evaluate + results['t-start'] = torch.cat(results['t-start']).numpy() + results['t-end'] = torch.cat(results['t-end']).numpy() + results['label'] = torch.cat(results['label']).numpy() + results['score'] = torch.cat(results['score']).numpy() + + if evaluator is not None: + if (ext_score_file is not None) and isinstance(ext_score_file, str): + + results = postprocess_results(results, ext_score_file) + # call the evaluator + + _, mAP = evaluator.evaluate(results, verbose=True) + + + # log mAP to tb_writer + if tb_writer is not None: + tb_writer.add_scalar('validation/mAP', mAP, curr_epoch) + + return mAP diff --git a/Downstream/Temporal-Action-Localization/th14_run.sh b/Downstream/Temporal-Action-Localization/th14_run.sh new file mode 100644 index 0000000..dd32396 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/th14_run.sh @@ -0,0 +1 @@ +python -u ./train_eval.py ./configs/thumos.yaml --output th14_mae_h 2>&1 | tee th14_mae_h.log \ No newline at end of file diff --git a/Downstream/Temporal-Action-Localization/tools/run_all_exps.sh b/Downstream/Temporal-Action-Localization/tools/run_all_exps.sh new file mode 100644 index 0000000..d6e7642 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/tools/run_all_exps.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# THUMOS14 with I3D features +python ./train.py ./configs/thumos_i3d.yaml --output final 2>&1 | tee ./ckpt/thumos_log.txt +python ./eval.py ./configs/thumos_i3d.yaml ./ckpt/thumos_i3d_final/ 2>&1 | tee ./ckpt/thumos_results.txt +sleep 5 + +# EPIC-Kitchens verb with slowfast features +python ./train.py ./configs/epic_slowfast_verb.yaml --output final 2>&1 | tee ./ckpt/epic_verb_log.txt +python ./eval.py ./configs/epic_slowfast_verb.yaml ./ckpt/epic_slowfast_verb_final/ 2>&1 | tee ./ckpt/epic_verb_results.txt +sleep 5 + +# EPIC-Kitchens noun with slowfast features +python ./train.py ./configs/epic_slowfast_noun.yaml --output final 2>&1 | tee ./ckpt/epic_noun_log.txt +python ./eval.py ./configs/epic_slowfast_noun.yaml ./ckpt/epic_slowfast_noun_final/ 2>&1 | tee ./ckpt/epic_noun_results.txt +sleep 5 + +# ActivityNet 1.3 with TSP features (r(2+1d)-34) +python ./train.py ./configs/anet_tsp.yaml --output final 2>&1 | tee ./ckpt/anet_tsp_log.txt +python ./eval.py ./configs/anet_tsp.yaml ./ckpt/anet_tsp_final/ 2>&1 | tee ./ckpt/anet_tsp_results.txt diff --git a/Downstream/Temporal-Action-Localization/train_eval.py b/Downstream/Temporal-Action-Localization/train_eval.py new file mode 100644 index 0000000..8e3fb51 --- /dev/null +++ b/Downstream/Temporal-Action-Localization/train_eval.py @@ -0,0 +1,230 @@ +# python imports +import argparse +import os +import time +import datetime +from pprint import pprint + +# torch imports +import torch +import torch.nn as nn +import torch.utils.data +# for visualization +from torch.utils.tensorboard import SummaryWriter + +# our code +from libs.core import load_config +from libs.datasets import make_dataset, make_data_loader +from libs.modeling import make_meta_arch +from libs.utils import (train_one_epoch, valid_one_epoch, ANETdetection, + save_checkpoint, make_optimizer, make_scheduler, + fix_random_seed, ModelEma) + + +################################################################################ +def main(args): + """main function that handles training / inference""" + + """1. setup parameters / folders""" + # parse args + args.start_epoch = 0 + if os.path.isfile(args.config): + cfg = load_config(args.config) + else: + raise ValueError("Config file does not exist.") + pprint(cfg) + + # prep for output folder (based on time stamp) + if not os.path.exists(cfg['output_folder']): + os.makedirs(cfg['output_folder']) + cfg_filename = os.path.basename(args.config).replace('.yaml', '') + if len(args.output) == 0: + ts = datetime.datetime.fromtimestamp(int(time.time())) + ckpt_folder = os.path.join( + cfg['output_folder'], cfg_filename + '_' + str(ts)) + else: + ckpt_folder = os.path.join( + cfg['output_folder'], cfg_filename + '_' + str(args.output)+'_'+str(cfg['loader']['batch_size'])+'_'+str(cfg['opt']['learning_rate'])) + if not os.path.exists(ckpt_folder): + os.mkdir(ckpt_folder) + # tensorboard writer + tb_writer = SummaryWriter(os.path.join(ckpt_folder, 'logs')) + + # fix the random seeds (this will fix everything) + rng_generator = fix_random_seed(cfg['init_rand_seed'], include_cuda=True) + + # re-scale learning rate / # workers based on number of GPUs + cfg['opt']["learning_rate"] *= len(cfg['devices']) + cfg['loader']['num_workers'] *= len(cfg['devices']) + + """2. create dataset / dataloader""" + train_dataset = make_dataset( + cfg['dataset_name'], True, cfg['train_split'], **cfg['dataset'] + ) + # update cfg based on dataset attributes (fix to epic-kitchens) + train_db_vars = train_dataset.get_attributes() + cfg['model']['train_cfg']['head_empty_cls'] = train_db_vars['empty_label_ids'] + + # data loaders + train_loader = make_data_loader( + train_dataset, True, rng_generator, **cfg['loader']) + + """3. create model, optimizer, and scheduler""" + # model + model = make_meta_arch(cfg['model_name'], **cfg['model']) + # not ideal for multi GPU training, ok for now + model = nn.DataParallel(model, device_ids=cfg['devices']) + # optimizer + optimizer = make_optimizer(model, cfg['opt']) + # schedule + num_iters_per_epoch = len(train_loader) + scheduler = make_scheduler(optimizer, cfg['opt'], num_iters_per_epoch) + + # enable model EMA + print("Using model EMA ...") + model_ema = ModelEma(model) + + """4. Resume from model / Misc""" + # resume from a checkpoint? + if args.resume: + if os.path.isfile(args.resume): + # load ckpt, reset epoch / best rmse + checkpoint = torch.load(args.resume, + map_location = lambda storage, loc: storage.cuda( + cfg['devices'][0])) + args.start_epoch = checkpoint['epoch'] + 1 + model.load_state_dict(checkpoint['state_dict']) + model_ema.module.load_state_dict(checkpoint['state_dict_ema']) + # also load the optimizer / scheduler if necessary + optimizer.load_state_dict(checkpoint['optimizer']) + scheduler.load_state_dict(checkpoint['scheduler']) + print("=> loaded checkpoint '{:s}' (epoch {:d}".format( + args.resume, checkpoint['epoch'] + )) + del checkpoint + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + return + + # save the current config + with open(os.path.join(ckpt_folder, 'config.txt'), 'w') as fid: + pprint(cfg, stream=fid) + fid.flush() + + """4. training / validation loop""" + print("\nStart training model {:s} ...".format(cfg['model_name'])) + + # start training + max_epochs = cfg['opt'].get( + 'early_stop_epochs', + cfg['opt']['epochs'] + cfg['opt']['warmup_epochs'] + ) + + + """2. create dataset / dataloader""" + val_dataset = make_dataset( + cfg['dataset_name'], False, cfg['val_split'], **cfg['dataset'] + ) + # set bs = 1, and disable shuffle + val_loader = make_data_loader( + val_dataset, False, None, 1, cfg['loader']['num_workers'] + ) + + + for epoch in range(args.start_epoch, max_epochs): + # train for one epoch + train_one_epoch( + train_loader, + model, + optimizer, + scheduler, + epoch, + model_ema = model_ema, + clip_grad_l2norm = cfg['train_cfg']['clip_grad_l2norm'], + tb_writer=tb_writer, + print_freq=args.print_freq + ) + + if epoch>=5:#(max_epochs//4): + + + # if epoch>1:#(max_epochs//3): + + # model + model_eval = make_meta_arch(cfg['model_name'], **cfg['model']) + # not ideal for multi GPU training, ok for now + model_eval = nn.DataParallel(model_eval, device_ids=cfg['devices']) + + + model_eval.load_state_dict(model_ema.module.state_dict()) + + + # set up evaluator + det_eval, output_file = None, None + # if not args.saveonly: + val_db_vars = val_dataset.get_attributes() + det_eval = ANETdetection( + val_dataset.json_file, + val_dataset.split[0], + tiou_thresholds = val_db_vars['tiou_thresholds'] + ) + # else: + # output_file = os.path.join('eval_results.pkl') + + """5. Test the model""" + print("\nStart testing model {:s} ...".format(cfg['model_name'])) + start = time.time() + mAP = valid_one_epoch( + val_loader, + model_eval, + -1, + evaluator=det_eval, + output_file=output_file, + ext_score_file=cfg['test_cfg']['ext_score_file'], + tb_writer=None, + print_freq=999999 #args.print_freq + ) + end = time.time() + # print("All done! Total time: {:0.2f} sec".format(end - start)) + print(epoch,mAP) + + save_states = { + 'epoch': epoch, + 'state_dict': model.state_dict(), + 'scheduler': scheduler.state_dict(), + 'optimizer': optimizer.state_dict(), + } + + save_states['state_dict_ema'] = model_ema.module.state_dict() + save_checkpoint( + save_states, + False, + file_folder=ckpt_folder, + file_name='epoch_{:03d}_{:.5f}.pth.tar'.format(epoch,mAP) + ) + + + + # wrap up + tb_writer.close() + print("All done!") + return + +################################################################################ +if __name__ == '__main__': + """Entry Point""" + # the arg parser + parser = argparse.ArgumentParser( + description='Train a point-based transformer for action localization') + parser.add_argument('config', metavar='DIR', + help='path to a config file') + parser.add_argument('-p', '--print-freq', default=10, type=int, + help='print frequency (default: 10 iterations)') + parser.add_argument('-c', '--ckpt-freq', default=5, type=int, + help='checkpoint frequency (default: every 5 epochs)') + parser.add_argument('--output', default='', type=str, + help='name of exp folder (default: none)') + parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='path to a checkpoint (default: none)') + args = parser.parse_args() + main(args) diff --git a/Downstream/Video-Text-Retrieval/LICENSE b/Downstream/Video-Text-Retrieval/LICENSE new file mode 100644 index 0000000..c8ff084 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 ArrowLuo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Downstream/Video-Text-Retrieval/README.md b/Downstream/Video-Text-Retrieval/README.md new file mode 100644 index 0000000..01d6804 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/README.md @@ -0,0 +1,107 @@ +# Video-Text-Retrieval + +## Introduction + +A model achieves the state-of-the-art video-text retrieval performance on two settings, six datasets, and ten metrics. + +### Our Results +**Zero-Shot Video Retrieval** +|Dataset| Setting | R@1↑ | R@5↑ | R@10↑ | MedR↓ | MeanR↓ | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| MSRVTT | video-to-text | 37.5 | 63.3 | 71.3 | 3.0 | 24.2 | +| | text-to-video | 40 | 65.3 | 74.1 | 2.0 | 23.9 | +| MSVD | video-to-text | 67.6 | 90.6 | 94.6 | 1.0 | 2.9 | +| | text-to-video | 43.4|69.9|79.1|2.0|17.0| +| LSMDC | video-to-text | 13.2|27.8|34.9|33.0|113.6| +| | text-to-video | 17.6|32.4|40.2|23.0|101.7| +| ActivityNet | video-to-text | 31.4|59.4|73.1|3.0|15.6| +| | text-to-video | 30.7 | 57.4 | 70.2| 4.0 | 23.0| +| DiDeMo | video-to-text | 33.5 | 60.3|71.1|3.0|21.5| +| | text-to-video | 31.5 | 57.6 | 68.2 | 3.0 | 35.7 | +| VATEX | video-to-text | 69.5 | 95|98.1|1.0|2.1| +| | text-to-video | 49.5|79.7|87|2.0|9.7| + + + +**Video Retrieval with Full Finetuning** +|Dataset| Setting | R@1↑ | R@5↑ | R@10↑ | MedR↓ | MeanR↓ | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| MSRVTT | video-to-text | 57.9|79.2|86.4|1.0|7.5| +| | text-to-video | 55.2|79.6|87.5|1.0|10.7| +| MSVD | video-to-text | 76.3|96.8|98.7 |1.0|2.1| +| | text-to-video | 58.4|84.5|90.4|1.0|8.2| +| LSMDC | video-to-text | 34.9|54.6|63.1|4.0|32.9| +| | text-to-video | 34.0|53.7|62.9|4.0|38.7| +| ActivityNet | video-to-text | 62.8|86.2|93.3|1.0|3.5| +| | text-to-video | 62.2|85.9|93.2|1.0|3.9| +| DiDeMo | video-to-text | 59.1|81.8|89.0|1.0|7.2| +| | text-to-video | 57.9|82.4|88.9|1.0|9.2 | +| VATEX | video-to-text | 86.0|99.2|99.6|1.0|1.3| +| | text-to-video | 72.0|95.1|97.8|1.0|2.4| + +## Main Dependencies + +- CUDA Version 11.1 +- PyTorch 1.8.1 +- torchvision 0.9.0 +- python 3.6.9 + +## Usage + +### Data Preparation + +Download Original Dataset: [MSR-VTT](http://ms-multimedia-challenge.com/2017/dataset), [MSVD](https://www.cs.utexas.edu/users/ml/clamp/videoDescription/), [LSMDC](https://sites.google.com/site/describingmovies/download), [DiDeMo](https://github.com/LisaAnne/LocalizingMoments), [ActivityNet](http://activity-net.org/download.html), [VATEX](https://eric-xw.github.io/vatex-website/about.html). + +All annotation files can be downloaded from [here](https://pjlab-my.sharepoint.cn/:u:/g/personal/wangyi_pjlab_org_cn/EREJFyTbpwFPppzv3tBlHp4BMUHu2wveRamzqDPF2AdhQQ?e=VmmP4p). Unzip and put them in the *data/* folder. + +### Pre-processing (optional) + +`python preprocess/compress_video.py --input_root [raw_video_path] --output_root [compressed_video_path]` + +This script will compress the video to 3fps with width 224 (or height 224). Modify the variables for your customization. + +### Pre-trained weights + +The pre-trained ViT weights of [CLIP](https://openai.com/blog/clip/) can be found here: [ViT-B/32](https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt), [ViT-B/16](https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt), [ViT-L/14](https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt). + +Our fine-tuned retrieval checkpoint on each dataset will be released soon. + +### Zeroshot evaluation + +All zero-shot scripts are provided in the *zeroshot_scripts/* folder. Be free to try different hyper-parameters. +```sh +./zeroshot_scripts/eval_msrvtt.sh +``` + +### Fine-Tune + +All fine-tune scripts are provided in the *finetune_scripts/* folder, and the **train_${dataset_name}.sh** files are used to fine-tune our InternVideo model. Be free to try different hyper-parameters. +```sh +./finetune_scripts/train_msrvtt.sh +``` + +### Evaluate the finetuned model + +All the test scripts for evaluating the finetuned checkpoints are provided in the *eval_finetuned_scripts/* folder. The scripts are slightly different from the zero-shot evaluation scripts. +```sh +./eval_finetuned_scripts/eval_finetuned_msrvtt.sh +``` + + +## License + +This project is released under the MIT license. + +## Acknowledgments + +Our codebase is based on [CLIP4clip](https://github.com/ArrowLuo/CLIP4Clip). + diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/data_dataloaders.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/data_dataloaders.cpython-36.pyc new file mode 100644 index 0000000..e6d5b9b Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/data_dataloaders.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_activitynet_retrieval.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_activitynet_retrieval.cpython-36.pyc new file mode 100644 index 0000000..c43cf20 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_activitynet_retrieval.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_didemo_retrieval.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_didemo_retrieval.cpython-36.pyc new file mode 100644 index 0000000..fb9dfa1 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_didemo_retrieval.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_ego4d.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_ego4d.cpython-36.pyc new file mode 100644 index 0000000..d1e3955 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_ego4d.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_lsmdc_retrieval.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_lsmdc_retrieval.cpython-36.pyc new file mode 100644 index 0000000..f0a6570 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_lsmdc_retrieval.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_msrvtt_retrieval.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_msrvtt_retrieval.cpython-36.pyc new file mode 100644 index 0000000..abcac47 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_msrvtt_retrieval.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_msvd_retrieval.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_msvd_retrieval.cpython-36.pyc new file mode 100644 index 0000000..0928e22 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_msvd_retrieval.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_vatex_retrieval.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_vatex_retrieval.cpython-36.pyc new file mode 100644 index 0000000..3fbda1c Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/dataloader_vatex_retrieval.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/rawvideo_util.cpython-36.pyc b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/rawvideo_util.cpython-36.pyc new file mode 100644 index 0000000..4bbdf35 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/dataloaders/__pycache__/rawvideo_util.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/dataloaders/data_dataloaders.py b/Downstream/Video-Text-Retrieval/dataloaders/data_dataloaders.py new file mode 100644 index 0000000..586cbc1 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/data_dataloaders.py @@ -0,0 +1,315 @@ +import torch +from torch.utils.data import DataLoader +from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_DataLoader +from dataloaders.dataloader_msrvtt_retrieval import MSRVTT_TrainDataLoader +from dataloaders.dataloader_msvd_retrieval import MSVD_DataLoader +from dataloaders.dataloader_lsmdc_retrieval import LSMDC_DataLoader +from dataloaders.dataloader_activitynet_retrieval import ActivityNet_DataLoader +from dataloaders.dataloader_didemo_retrieval import DiDeMo_DataLoader +from dataloaders.dataloader_vatex_retrieval import VATEX_DataLoader + +def dataloader_msrvtt_train(args, tokenizer): + msrvtt_dataset = MSRVTT_TrainDataLoader( + csv_path=args.train_csv, + json_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + unfold_sentences=args.expand_msrvtt_sentences, + frame_order=args.train_frame_order, + slice_framepos=args.slice_framepos, + ) + + train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset) + dataloader = DataLoader( + msrvtt_dataset, + batch_size=args.batch_size // args.n_gpu, + num_workers=args.num_thread_reader, + pin_memory=False, + shuffle=(train_sampler is None), + sampler=train_sampler, + drop_last=True, + persistent_workers=True, + ) + + return dataloader, len(msrvtt_dataset), train_sampler + +def dataloader_msrvtt_test(args, tokenizer, subset="test"): + msrvtt_testset = MSRVTT_DataLoader( + csv_path=args.val_csv, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.eval_frame_order, + slice_framepos=args.slice_framepos, + ) + dataloader_msrvtt = DataLoader( + msrvtt_testset, + batch_size=args.batch_size_val, + num_workers=args.num_thread_reader, + shuffle=False, + drop_last=False, + persistent_workers=True, + ) + return dataloader_msrvtt, len(msrvtt_testset) + + +def dataloader_msvd_train(args, tokenizer): + msvd_dataset = MSVD_DataLoader( + subset="train", + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.train_frame_order, + slice_framepos=args.slice_framepos, + ) + + train_sampler = torch.utils.data.distributed.DistributedSampler(msvd_dataset) + dataloader = DataLoader( + msvd_dataset, + batch_size=args.batch_size // args.n_gpu, + num_workers=args.num_thread_reader, + pin_memory=False, + shuffle=(train_sampler is None), + sampler=train_sampler, + drop_last=True, + persistent_workers=True, + ) + + return dataloader, len(msvd_dataset), train_sampler + +def dataloader_msvd_test(args, tokenizer, subset="test"): + msvd_testset = MSVD_DataLoader( + subset=subset, + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.eval_frame_order, + slice_framepos=args.slice_framepos, + ) + dataloader_msrvtt = DataLoader( + msvd_testset, + batch_size=args.batch_size_val, + num_workers=args.num_thread_reader, + shuffle=False, + drop_last=False, + persistent_workers=True, + ) + return dataloader_msrvtt, len(msvd_testset) + + +def dataloader_lsmdc_train(args, tokenizer): + lsmdc_dataset = LSMDC_DataLoader( + subset="train", + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.train_frame_order, + slice_framepos=args.slice_framepos, + + ) + + train_sampler = torch.utils.data.distributed.DistributedSampler(lsmdc_dataset) + dataloader = DataLoader( + lsmdc_dataset, + batch_size=args.batch_size // args.n_gpu, + num_workers=args.num_thread_reader, + pin_memory=False, + shuffle=(train_sampler is None), + sampler=train_sampler, + drop_last=True, + persistent_workers=True, + ) + + return dataloader, len(lsmdc_dataset), train_sampler + +def dataloader_lsmdc_test(args, tokenizer, subset="test"): + lsmdc_testset = LSMDC_DataLoader( + subset=subset, + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.eval_frame_order, + slice_framepos=args.slice_framepos, + ) + dataloader_msrvtt = DataLoader( + lsmdc_testset, + batch_size=args.batch_size_val, + num_workers=args.num_thread_reader, + shuffle=False, + drop_last=False, + persistent_workers=True, + ) + return dataloader_msrvtt, len(lsmdc_testset) + + +def dataloader_activity_train(args, tokenizer): + activity_dataset = ActivityNet_DataLoader( + subset="train", + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.train_frame_order, + slice_framepos=args.slice_framepos, + ) + + train_sampler = torch.utils.data.distributed.DistributedSampler(activity_dataset) + dataloader = DataLoader( + activity_dataset, + batch_size=args.batch_size // args.n_gpu, + num_workers=args.num_thread_reader, + pin_memory=False, + shuffle=(train_sampler is None), + sampler=train_sampler, + drop_last=True, + persistent_workers=True, + ) + + return dataloader, len(activity_dataset), train_sampler + +def dataloader_activity_test(args, tokenizer, subset="test"): + activity_testset = ActivityNet_DataLoader( + subset=subset, + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.eval_frame_order, + slice_framepos=args.slice_framepos, + ) + dataloader_msrvtt = DataLoader( + activity_testset, + batch_size=args.batch_size_val, + num_workers=args.num_thread_reader, + shuffle=False, + drop_last=False, + persistent_workers=True, + ) + return dataloader_msrvtt, len(activity_testset) + + +def dataloader_didemo_train(args, tokenizer): + didemo_dataset = DiDeMo_DataLoader( + subset="train", + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.train_frame_order, + slice_framepos=args.slice_framepos, + ) + + train_sampler = torch.utils.data.distributed.DistributedSampler(didemo_dataset) + dataloader = DataLoader( + didemo_dataset, + batch_size=args.batch_size // args.n_gpu, + num_workers=args.num_thread_reader, + pin_memory=False, + shuffle=(train_sampler is None), + sampler=train_sampler, + drop_last=True, + persistent_workers=True, + ) + + return dataloader, len(didemo_dataset), train_sampler + +def dataloader_didemo_test(args, tokenizer, subset="test"): + didemo_testset = DiDeMo_DataLoader( + subset=subset, + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.eval_frame_order, + slice_framepos=args.slice_framepos, + ) + dataloader_didemo = DataLoader( + didemo_testset, + batch_size=args.batch_size_val, + num_workers=args.num_thread_reader, + shuffle=False, + drop_last=False, + persistent_workers=True, + ) + return dataloader_didemo, len(didemo_testset) + +def dataloader_vatex_train(args, tokenizer): + vatex_dataset = VATEX_DataLoader( + subset="train", + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.train_frame_order, + slice_framepos=args.slice_framepos, + ) + + train_sampler = torch.utils.data.distributed.DistributedSampler(vatex_dataset) + dataloader = DataLoader( + vatex_dataset, + batch_size=args.batch_size // args.n_gpu, + num_workers=args.num_thread_reader, + pin_memory=False, + shuffle=(train_sampler is None), + sampler=train_sampler, + drop_last=True, + ) + + return dataloader, len(vatex_dataset), train_sampler + +def dataloader_vatex_test(args, tokenizer, subset="test"): + vatex_testset = VATEX_DataLoader( + subset=subset, + data_path=args.data_path, + features_path=args.features_path, + max_words=args.max_words, + feature_framerate=args.feature_framerate, + tokenizer=tokenizer, + max_frames=args.max_frames, + frame_order=args.eval_frame_order, + slice_framepos=args.slice_framepos, + ) + dataloader_msrvtt = DataLoader( + vatex_testset, + batch_size=args.batch_size_val, + num_workers=args.num_thread_reader, + shuffle=False, + drop_last=False, + ) + return dataloader_msrvtt, len(vatex_testset) + + +DATALOADER_DICT = {} +DATALOADER_DICT["msrvtt"] = {"train":dataloader_msrvtt_train, "val":dataloader_msrvtt_test, "test":None} +DATALOADER_DICT["msvd"] = {"train":dataloader_msvd_train, "val":dataloader_msvd_test, "test":dataloader_msvd_test} +DATALOADER_DICT["lsmdc"] = {"train":dataloader_lsmdc_train, "val":dataloader_lsmdc_test, "test":dataloader_lsmdc_test} +DATALOADER_DICT["activity"] = {"train":dataloader_activity_train, "val":dataloader_activity_test, "test":None} +DATALOADER_DICT["didemo"] = {"train":dataloader_didemo_train, "val":dataloader_didemo_test, "test":dataloader_didemo_test} +DATALOADER_DICT["vatex"] = {"train":dataloader_vatex_train, "val":dataloader_vatex_test, "test":dataloader_vatex_test} \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/dataloaders/dataloader_activitynet_retrieval.py b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_activitynet_retrieval.py new file mode 100644 index 0000000..9de7664 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_activitynet_retrieval.py @@ -0,0 +1,330 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import io +import os +from torch.utils.data import Dataset +import numpy as np +import json +import math +from dataloaders.rawvideo_util import RawVideoExtractor +from decord import VideoReader, cpu +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode + +try: + from petrel_client.client import Client + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + +class ActivityNet_DataLoader(Dataset): + def __init__( + self, + subset, + data_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.data_path = data_path + self.features_path = features_path + self.feature_framerate = feature_framerate + self.image_resolution = image_resolution + self.max_words = max_words + self.max_frames = max_frames + self.tokenizer = tokenizer + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.subset = subset + assert self.subset in ["train", "val"] + + video_id_path_dict = {} + video_id_path_dict["train"] = os.path.join(self.data_path, "train_ids.json") + video_id_path_dict["val"] = os.path.join(self.data_path, "val_ids.json") + + video_json_path_dict = {} + video_json_path_dict["train"] = os.path.join(self.data_path, "train.json") + video_json_path_dict["val"] = os.path.join(self.data_path, "val_1.json") + + pseudo_video_id_list, video_id_list = self._get_video_id_single(video_id_path_dict[self.subset]) + pseudo_caption_dict = self._get_captions_single(video_json_path_dict[self.subset]) + + # print("video id list: {}".format(len(video_id_list))) + # print("pseudo caption dict: {}".format(len(pseudo_caption_dict.keys()))) + + video_dict = {} + for content in client.list(self.features_path): + if content.endswith('/'): + video_cur_path = os.path.join(self.features_path, content) + video_files = client.list(video_cur_path) + for video_file in video_files: + video_id_ = ".".join(video_file.split(".")[:-1]) + if video_id_ not in pseudo_video_id_list: + continue + file_path_ = os.path.join(video_cur_path, video_file) + video_dict[video_id_] = file_path_ + self.video_dict = video_dict + # print("video dict: {}".format(len(video_dict))) + self.pseudo_video_id_list = pseudo_video_id_list + self.video_id_list = video_id_list + self.pseudo_caption_dict = pseudo_caption_dict + # Get iterator video ids + self.video_id2idx_dict = {pseudo_video_id: id for id, pseudo_video_id in enumerate(self.pseudo_video_id_list)} + # Get all captions + self.iter2video_pairs_dict = {} + for pseudo_video_id in self.pseudo_video_id_list: + if pseudo_video_id not in self.pseudo_caption_dict: + continue + caption = self.pseudo_caption_dict[pseudo_video_id] + n_caption = len(caption['start']) + for sub_id in range(n_caption): + self.iter2video_pairs_dict[len(self.iter2video_pairs_dict)] = (pseudo_video_id, sub_id) + # print(f"{subset} iter2video pairs dict: {len(self.iter2video_pairs_dict)}") + # print(f"{subset} pseudo caption dict: {len(pseudo_caption_dict)}") + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + def __len__(self): + return len(self.iter2video_pairs_dict) + + def _get_video_id_from_pseduo(self, pseudo_video_id): + video_id = pseudo_video_id[2:] + return video_id + + def _get_video_id_single(self, path): + pseudo_video_id_list = [] + video_id_list = [] + # print('Loading json: {}'.format(path)) + with open(path, 'r') as f: + json_data = json.load(f) + + for pseudo_video_id in json_data: + if pseudo_video_id in pseudo_video_id_list: + # print("reduplicate.") + pass + else: + video_id = self._get_video_id_from_pseduo(pseudo_video_id) + pseudo_video_id_list.append(pseudo_video_id) + video_id_list.append(video_id) + return pseudo_video_id_list, video_id_list + + def _get_captions_single(self, path): + pseudo_caption_dict = {} + with open(path, 'r') as f: + json_data = json.load(f) + + for pseudo_video_id, v_ in json_data.items(): + pseudo_caption_dict[pseudo_video_id] = {} + duration = v_["duration"] + pseudo_caption_dict[pseudo_video_id]["start"] = np.array([0], dtype=object) + pseudo_caption_dict[pseudo_video_id]["end"] = np.array([int(math.ceil(float(duration)))], dtype=object) + pseudo_caption_dict[pseudo_video_id]["text"] = np.array([" ".join(v_["sentences"])], dtype=object) + return pseudo_caption_dict + + def _get_text(self, pseudo_video_id, sub_id): + caption = self.pseudo_caption_dict[pseudo_video_id] + k = 1 + r_ind = [sub_id] + + starts = np.zeros(k, dtype=np.long) + ends = np.zeros(k, dtype=np.long) + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i in range(k): + ind = r_ind[i] + start_, end_ = caption['start'][ind], caption['end'][ind] + words = self.tokenizer.tokenize(caption['text'][ind]) + starts[i], ends[i] = start_, end_ + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, starts, ends + + def _get_rawvideo(self, idx, s, e): + video_mask = np.zeros((len(s), self.max_frames), dtype=np.long) + max_video_length = [0] * len(s) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(s), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + # video_path = self.video_dict[idx] + video_path = os.path.join(self.features_path, self.subset, "{}.mp4".format("v_"+idx)) + try: + for i in range(len(s)): + start_time = int(s[i]) + end_time = int(e[i]) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = end_time + 1 + + # Should be optimized by gathering all asking of this video + raw_video_data = self.rawVideoExtractor.get_video_data(video_path, start_time, end_time) + raw_video_data = raw_video_data['video'] + + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("video path: {} error. video id: {}, start: {}, end: {}".format(video_path, idx, start_time, end_time)) + except Exception as excep: + print("video path: {} error. video id: {}, start: {}, end: {}, Error: {}".format(video_path, idx, s, e, excep)) + raise excep + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + return video, video_mask + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + # video_mask = np.zeros(self.max_frames, dtype=np.long) + choice_video_ids = [choice_video_ids] + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + + # max_video_length = 0 + max_video_length = [0] * len(choice_video_ids) + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + # video_path = self.video_dict[video_id] + for i, video_id in enumerate(choice_video_ids): + video_path = os.path.join(self.features_path, self.subset, "{}.mp4".format("v_"+video_id)) + if video_path.startswith("s3://"): + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = io.BytesIO(video_bytes) + vreader = VideoReader(video_path, ctx=cpu(0)) + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = patch_images + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + # video_mask[:max_video_length] = [1] * max_video_length + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + # print(video.shape, video_mask.shape) + return video, video_mask + + def __getitem__(self, feature_idx): + # print(f"cur idx: {feature_idx}") + pseudo_video_id, sub_id = self.iter2video_pairs_dict[feature_idx] + idx = self.video_id2idx_dict[pseudo_video_id] + + pairs_text, pairs_mask, pairs_segment, starts, ends = self._get_text(pseudo_video_id, sub_id) + # video, video_mask = self._get_rawvideo_dec(self.video_id_list, starts, ends) + video, video_mask = self._get_rawvideo(self.video_id_list[idx], starts, ends) + return pairs_text, pairs_mask, pairs_segment, video, video_mask diff --git a/Downstream/Video-Text-Retrieval/dataloaders/dataloader_didemo_retrieval.py b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_didemo_retrieval.py new file mode 100644 index 0000000..af80f9b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_didemo_retrieval.py @@ -0,0 +1,336 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import os +from torch.utils.data import Dataset +import numpy as np +import json +from dataloaders.rawvideo_util import RawVideoExtractor +import io +from decord import VideoReader, cpu +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode + +try: + from petrel_client.client import Client + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + + +class DiDeMo_DataLoader(Dataset): + def __init__( + self, + subset, + data_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.data_path = data_path + self.features_path = features_path + self.feature_framerate = feature_framerate + self.max_words = max_words + self.max_frames = max_frames + self.image_resolution = image_resolution + self.tokenizer = tokenizer + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.subset = subset + assert self.subset in ["train", "val", "test"] + + video_id_path_dict = {} + video_id_path_dict["train"] = os.path.join(self.data_path, "train_list.txt") + video_id_path_dict["val"] = os.path.join(self.data_path, "val_list.txt") + video_id_path_dict["test"] = os.path.join(self.data_path, "test_list.txt") + + with open(video_id_path_dict[self.subset], 'r') as fp: + video_ids = [itm.strip()+".mp4" for itm in fp.readlines()] + + video_json_path_dict = {} + video_json_path_dict["train"] = os.path.join(self.data_path, "train_data.json") + video_json_path_dict["val"] = os.path.join(self.data_path, "val_data.json") + video_json_path_dict["test"] = os.path.join(self.data_path, "test_data.json") + + caption_dict = {} + with open(video_json_path_dict[self.subset], 'r') as f: + json_data = json.load(f) + for itm in json_data: + description = itm["description"] + times = itm["times"] + video = itm["video"] + ".mp4" + if video not in video_ids: + continue + + # each video is split into 5-second temporal chunks + # average the points from each annotator + start_ = np.mean([t_[0] for t_ in times]) * 5 + end_ = (np.mean([t_[1] for t_ in times]) + 1) * 5 + if video in caption_dict: + caption_dict[video]["start"].append(start_) + caption_dict[video]["end"].append(end_) + caption_dict[video]["text"].append(description) + else: + caption_dict[video] = {} + caption_dict[video]["start"] = [start_] + caption_dict[video]["end"] = [end_] + caption_dict[video]["text"] = [description] + + for k_ in caption_dict.keys(): + caption_dict[k_]["start"] = [0] + # trick to save time on obtaining each video length + # [https://github.com/LisaAnne/LocalizingMoments/blob/master/README.md]: + # Some videos are longer than 30 seconds. These videos were truncated to 30 seconds during annotation. + caption_dict[k_]["end"] = [31] + caption_dict[k_]["text"] = [" ".join(caption_dict[k_]["text"])] + + video_dict = {} + # for root, dub_dir, video_files in os.walk(self.features_path): + # for video_file in video_files: + # video_id_ = video_file + # if video_id_ not in video_ids: + # continue + # file_path_ = os.path.join(root, video_file) + # video_dict[video_id_] = file_path_ + for video_file in client.list(self.features_path): + video_id_ = video_file + if video_id_ not in video_ids: + continue + file_path_ = os.path.join(self.features_path, video_file) + video_dict[video_id_] = file_path_ + + self.caption_dict = caption_dict + self.video_dict = video_dict + video_ids = list(set(video_ids) & set(self.caption_dict.keys()) & set(self.video_dict.keys())) + + # Get all captions + self.iter2video_pairs_dict = {} + for video_id in self.caption_dict.keys(): + if video_id not in video_ids: + continue + caption = self.caption_dict[video_id] + n_caption = len(caption['start']) + for sub_id in range(n_caption): + self.iter2video_pairs_dict[len(self.iter2video_pairs_dict)] = (video_id, sub_id) + + print(f"caption dict len: {len(self.caption_dict)}") + print(f"video ids len: {len(video_ids)}") + print(f"iter2video pairs dict len: {len(self.iter2video_pairs_dict)}") + print(f"video dict len: {len(self.video_dict)}") + + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + + def __len__(self): + return len(self.iter2video_pairs_dict) + + def _get_text(self, video_id, sub_id): + caption = self.caption_dict[video_id] + k = 1 + r_ind = [sub_id] + + starts = np.zeros(k, dtype=np.long) + ends = np.zeros(k, dtype=np.long) + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i in range(k): + ind = r_ind[i] + start_, end_ = caption['start'][ind], caption['end'][ind] + words = self.tokenizer.tokenize(caption['text'][ind]) + starts[i], ends[i] = start_, end_ + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, starts, ends + + def _get_rawvideo(self, idx, s, e): + video_mask = np.zeros((len(s), self.max_frames), dtype=np.long) + max_video_length = [0] * len(s) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(s), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + video_path = self.video_dict[idx] + + try: + for i in range(len(s)): + start_time = int(s[i]) + end_time = int(e[i]) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = end_time + 1 + + cache_id = "{}_{}_{}".format(video_path, start_time, end_time) + # Should be optimized by gathering all asking of this video + raw_video_data = self.rawVideoExtractor.get_video_data(video_path, start_time, end_time) + raw_video_data = raw_video_data['video'] + # print(f"video path: {video_path}, raw video data shape: {raw_video_data.shape}") + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("Dimension error! video path: {} error. video id: {}, start: {}, end: {}".format(video_path, idx, start_time, end_time)) + except Exception as excep: + print("video path: {} error. video id: {}, start: {}, end: {}, Error: {}".format(video_path, idx, s, e, excep)) + pass + # raise e + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + return video, video_mask + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + video_mask = np.zeros((1, self.max_frames), dtype=np.long) + max_video_length = [0] + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(s), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + + video_path = self.video_dict[choice_video_ids] + + try: + if video_path.startswith("s3://"): + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = io.BytesIO(video_bytes) + + vreader = VideoReader(video_path, ctx=cpu(0)) + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[0] = max_video_length[0] if max_video_length[0] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[0][:slice_len, ...] = patch_images + else: + print("Error. video id: {}".format(choice_video_ids)) + except Exception as excep: + print("Error. video id: {}, start: {}, end: {}, Error: {}".format(choice_video_ids, s, e, excep)) + print(video.shape) + pass + # raise e + + v_length = max_video_length[0] + video_mask[0][:v_length] = [1] * v_length + + # print(video.shape, video_mask.shape) + return video, video_mask + + def __getitem__(self, feature_idx): + video_id, sub_id = self.iter2video_pairs_dict[feature_idx] + + pairs_text, pairs_mask, pairs_segment, starts, ends = self._get_text(video_id, sub_id) + video, video_mask = self._get_rawvideo_dec(video_id, starts, ends) + # print(video.shape, video_mask.shape) + return pairs_text, pairs_mask, pairs_segment, video, video_mask diff --git a/Downstream/Video-Text-Retrieval/dataloaders/dataloader_lsmdc_retrieval.py b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_lsmdc_retrieval.py new file mode 100644 index 0000000..21fa443 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_lsmdc_retrieval.py @@ -0,0 +1,314 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import os +from torch.utils.data import Dataset +import numpy as np +import json +import math +from dataloaders.rawvideo_util import RawVideoExtractor +import io +from decord import VideoReader, cpu +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode + +try: + from petrel_client.client import Client + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + + +class LSMDC_DataLoader(Dataset): + """LSMDC dataset loader.""" + def __init__( + self, + subset, + data_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.data_path = data_path + self.features_path = features_path + self.feature_framerate = feature_framerate + self.max_words = max_words + self.max_frames = max_frames + self.image_resolution = image_resolution + self.tokenizer = tokenizer + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.subset = subset + assert self.subset in ["train", "val", "test"] + + video_json_path_dict = {} + video_json_path_dict["train"] = os.path.join(self.data_path, "LSMDC16_annos_training.csv") + video_json_path_dict["val"] = os.path.join(self.data_path, "LSMDC16_annos_val.csv") + video_json_path_dict["test"] = os.path.join(self.data_path, "LSMDC16_challenge_1000_publictect.csv") + + # \t\t\t\t\t + # is not a unique identifier, i.e. the same can be associated with multiple sentences. + # However, LSMDC16_challenge_1000_publictect.csv has no repeat instances + video_id_list = [] + caption_dict = {} + + with open(video_json_path_dict[self.subset], 'r') as fp: + for line in fp: + line = line.strip() + line_split = line.split("\t") + assert len(line_split) == 6 + clip_id, start_aligned, end_aligned, start_extracted, end_extracted, sentence = line_split + caption_dict[len(caption_dict)] = (clip_id, sentence) + if clip_id not in video_id_list: video_id_list.append(clip_id) + + if subset=='train': + video_id_list = video_id_list[:58200] + video_id_list[58300:] + + video_dict = {} + for content in client.list(self.features_path): + if content.endswith('/'): + video_cur_path = os.path.join(self.features_path, content) + video_files = client.list(video_cur_path) + for video_file in video_files: + video_id_ = ".".join(video_file.split(".")[:-1]) + if video_id_ not in video_id_list: + continue + file_path_ = os.path.join(video_cur_path, video_file) + video_dict[video_id_] = file_path_ + + self.video_dict = video_dict + + # Get all captions + self.iter2video_pairs_dict = {} + for clip_id, sentence in caption_dict.values(): + if clip_id not in self.video_dict: + continue + self.iter2video_pairs_dict[len(self.iter2video_pairs_dict)] = (clip_id, sentence) + + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + + def __len__(self): + return len(self.iter2video_pairs_dict) + + def _get_video_id_from_pseduo(self, pseudo_video_id): + video_id = pseudo_video_id[2:] + return video_id + + def _get_video_id_single(self, path): + pseudo_video_id_list = [] + video_id_list = [] + print('Loading json: {}'.format(path)) + with open(path, 'r') as f: + json_data = json.load(f) + + for pseudo_video_id in json_data: + if pseudo_video_id in pseudo_video_id_list: + print("reduplicate.") + else: + video_id = self._get_video_id_from_pseduo(pseudo_video_id) + pseudo_video_id_list.append(pseudo_video_id) + video_id_list.append(video_id) + return pseudo_video_id_list, video_id_list + + def _get_captions_single(self, path): + pseudo_caption_dict = {} + with open(path, 'r') as f: + json_data = json.load(f) + + for pseudo_video_id, v_ in json_data.items(): + pseudo_caption_dict[pseudo_video_id] = {} + timestamps = v_["timestamps"] + pseudo_caption_dict[pseudo_video_id]["start"] = \ + np.array([int(math.floor(float(itm[0]))) for itm in timestamps], dtype=object) + pseudo_caption_dict[pseudo_video_id]["end"] = \ + np.array([int(math.ceil(float(itm[1]))) for itm in timestamps], dtype=object) + pseudo_caption_dict[pseudo_video_id]["text"] = np.array(v_["sentences"], dtype=object) + return pseudo_caption_dict + + def _get_text(self, video_id, caption): + k = 1 + choice_video_ids = [video_id] + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i, video_id in enumerate(choice_video_ids): + words = self.tokenizer.tokenize(caption) + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, choice_video_ids + + def _get_rawvideo(self, choice_video_ids): + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + max_video_length = [0] * len(choice_video_ids) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + + try: + for i, video_id in enumerate(choice_video_ids): + video_path = self.video_dict[video_id] + + raw_video_data = self.rawVideoExtractor.get_video_data(video_path) + raw_video_data = raw_video_data['video'] + + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0]-1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + except Exception as excep: + print("Video ids: {}".format(choice_video_ids)) + raise excep + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + return video, video_mask + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + # video_mask = np.zeros(self.max_frames, dtype=np.long) + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + + # max_video_length = 0 + max_video_length = [0] * len(choice_video_ids) + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + for i, video_id in enumerate(choice_video_ids): + video_path = self.video_dict[video_id] + # video_path = os.path.join(self.features_path, "{}.avi".format(video_id)) + if video_path.startswith("s3://"): + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = io.BytesIO(video_bytes) + vreader = VideoReader(video_path, ctx=cpu(0)) + + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = patch_images + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + # video_mask[:max_video_length] = [1] * max_video_length + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + #print(video.shape, video_mask.shape) + return video, video_mask + + def __getitem__(self, feature_idx): + clip_id, sentence = self.iter2video_pairs_dict[feature_idx] + pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(clip_id, sentence) + video, video_mask = self._get_rawvideo_dec(choice_video_ids) + return pairs_text, pairs_mask, pairs_segment, video, video_mask diff --git a/Downstream/Video-Text-Retrieval/dataloaders/dataloader_msrvtt_retrieval.py b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_msrvtt_retrieval.py new file mode 100644 index 0000000..8a68ce1 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_msrvtt_retrieval.py @@ -0,0 +1,492 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import os +import io +from torch.utils.data import Dataset +import numpy as np +import pandas as pd +from collections import defaultdict +import json +import random +from dataloaders.rawvideo_util import RawVideoExtractor +try: + from petrel_client.client import Client + + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + +from decord import VideoReader, cpu +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode + +class MSRVTT_DataLoader(Dataset): + """MSRVTT dataset loader.""" + def __init__( + self, + csv_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.data = pd.read_csv(csv_path) + self.features_path = features_path + self.feature_framerate = feature_framerate + self.max_words = max_words + self.max_frames = max_frames + self.tokenizer = tokenizer + self.image_resolution = image_resolution + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + def __len__(self): + return len(self.data) + + def _get_text(self, video_id, sentence): + choice_video_ids = [video_id] + n_caption = len(choice_video_ids) + + k = n_caption + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i, video_id in enumerate(choice_video_ids): + words = self.tokenizer.tokenize(sentence) + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, choice_video_ids + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + # video_mask = np.zeros(self.max_frames, dtype=np.long) + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + + # max_video_length = 0 + max_video_length = [0] * len(choice_video_ids) + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + # video_path = self.video_dict[video_id] + for i, video_id in enumerate(choice_video_ids): + + video_path = os.path.join(self.features_path, "{}.mp4".format(video_id)) + + if video_path.startswith("s3://"): + video_path = video_path.replace('videos', 'MSRVTT_Videos') + video_bytes = client.get(video_path, enable_stream=True) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = video_bytes + if isinstance(video_path, bytes): + video_path = io.BytesIO(video_bytes) + vreader = VideoReader(video_path, ctx=cpu(0)) + + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = patch_images + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + # video_mask[:max_video_length] = [1] * max_video_length + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + #print(video.shape, video_mask.shape) + return video, video_mask + + def _get_rawvideo(self, choice_video_ids): + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + max_video_length = [0] * len(choice_video_ids) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + + for i, video_id in enumerate(choice_video_ids): + # Individual for YoucokII dataset, due to it video format + video_path = os.path.join(self.features_path, "{}.mp4".format(video_id)) + #if os.path.exists(video_path) is False: + # video_path = video_path.replace(".mp4", ".webm") + + raw_video_data = self.rawVideoExtractor.get_video_data(video_path) + raw_video_data = raw_video_data['video'] + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + return video, video_mask + + def __getitem__(self, idx): + video_id = self.data['video_id'].values[idx] + sentence = self.data['sentence'].values[idx] + + pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, sentence) + # video, video_mask = self._get_rawvideo(choice_video_ids) + video, video_mask = self._get_rawvideo_dec(choice_video_ids) + return pairs_text, pairs_mask, pairs_segment, video, video_mask + +class MSRVTT_TrainDataLoader(Dataset): + """MSRVTT train dataset loader.""" + def __init__( + self, + csv_path, + json_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + unfold_sentences=False, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.csv = pd.read_csv(csv_path) + self.data = json.load(open(json_path, 'r')) + self.features_path = features_path + self.feature_framerate = feature_framerate + self.max_words = max_words + self.max_frames = max_frames + self.tokenizer = tokenizer + self.image_resolution = image_resolution + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.unfold_sentences = unfold_sentences + self.sample_len = 0 + if self.unfold_sentences: + train_video_ids = list(self.csv['video_id'].values) + self.sentences_dict = {} + for itm in self.data['sentences']: + if itm['video_id'] in train_video_ids: + self.sentences_dict[len(self.sentences_dict)] = (itm['video_id'], itm['caption']) + self.sample_len = len(self.sentences_dict) + else: + num_sentences = 0 + self.sentences = defaultdict(list) + s_video_id_set = set() + for itm in self.data['sentences']: + self.sentences[itm['video_id']].append(itm['caption']) + num_sentences += 1 + s_video_id_set.add(itm['video_id']) + + # Use to find the clips in the same video + self.parent_ids = {} + self.children_video_ids = defaultdict(list) + for itm in self.data['videos']: + vid = itm["video_id"] + url_posfix = itm["url"].split("?v=")[-1] + self.parent_ids[vid] = url_posfix + self.children_video_ids[url_posfix].append(vid) + self.sample_len = len(self.csv) + + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + def __len__(self): + return self.sample_len + + def _get_text(self, video_id, caption=None): + k = 1 + choice_video_ids = [video_id] + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i, video_id in enumerate(choice_video_ids): + if caption is not None: + words = self.tokenizer.tokenize(caption) + else: + words = self._get_single_text(video_id) + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, choice_video_ids + + def _get_single_text(self, video_id): + rind = random.randint(0, len(self.sentences[video_id]) - 1) + caption = self.sentences[video_id][rind] + words = self.tokenizer.tokenize(caption) + return words + + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + # video_mask = np.zeros(self.max_frames, dtype=np.long) + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + + # max_video_length = 0 + max_video_length = [0] * len(choice_video_ids) + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + # video_path = self.video_dict[video_id] + for i, video_id in enumerate(choice_video_ids): + + video_path = os.path.join(self.features_path, "{}.mp4".format(video_id)) + + if video_path.startswith("s3://"): + video_path = video_path.replace('videos', 'MSRVTT_Videos') + video_bytes = client.get(video_path, enable_stream=True) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = video_bytes + if isinstance(video_path, bytes): + video_path = io.BytesIO(video_bytes) + vreader = VideoReader(video_path, ctx=cpu(0)) + + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = patch_images + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + # video_mask[:max_video_length] = [1] * max_video_length + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + #print(video.shape, video_mask.shape) + return video, video_mask + + def _get_rawvideo(self, choice_video_ids): + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + max_video_length = [0] * len(choice_video_ids) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + + for i, video_id in enumerate(choice_video_ids): + # Individual for YoucokII dataset, due to it video format + video_path = os.path.join(self.features_path, "{}.mp4".format(video_id)) + if os.path.exists(video_path) is False: + video_path = video_path.replace(".mp4", ".webm") + + raw_video_data = self.rawVideoExtractor.get_video_data(video_path) + raw_video_data = raw_video_data['video'] + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + return video, video_mask + + def __getitem__(self, idx): + if self.unfold_sentences: + video_id, caption = self.sentences_dict[idx] + else: + video_id, caption = self.csv['video_id'].values[idx], None + pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, caption) + # video, video_mask = self._get_rawvideo(choice_video_ids) + video, video_mask = self._get_rawvideo_dec(choice_video_ids) + return pairs_text, pairs_mask, pairs_segment, video, video_mask diff --git a/Downstream/Video-Text-Retrieval/dataloaders/dataloader_msvd_retrieval.py b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_msvd_retrieval.py new file mode 100644 index 0000000..f96c531 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_msvd_retrieval.py @@ -0,0 +1,281 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import os +import io +from torch.utils.data import Dataset +import numpy as np +import pickle +from dataloaders.rawvideo_util import RawVideoExtractor +from decord import VideoReader, cpu +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode + +try: + from petrel_client.client import Client + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + +class MSVD_DataLoader(Dataset): + """MSVD dataset loader.""" + def __init__( + self, + subset, + data_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.data_path = data_path + self.features_path = features_path + self.feature_framerate = feature_framerate + self.image_resolution = image_resolution + self.max_words = max_words + self.max_frames = max_frames + self.tokenizer = tokenizer + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.subset = subset + assert self.subset in ["train", "val", "test"] + video_id_path_dict = {} + video_id_path_dict["train"] = os.path.join(self.data_path, "train_list.txt") + video_id_path_dict["val"] = os.path.join(self.data_path, "val_list.txt") + video_id_path_dict["test"] = os.path.join(self.data_path, "test_list.txt") + caption_file = os.path.join(self.data_path, "raw-captions.pkl") + + with open(video_id_path_dict[self.subset], 'r') as fp: + video_ids = [itm.strip() for itm in fp.readlines()] + + with open(caption_file, 'rb') as f: + captions = pickle.load(f) + + video_dict = {} + for video_file in client.list(self.features_path): + video_id_ = ".".join(video_file.split(".")[:-1]) + if video_id_ not in video_ids: + continue + file_path_ = os.path.join(self.features_path, video_file) + video_dict[video_id_] = file_path_ + self.video_dict = video_dict + + self.sample_len = 0 + self.sentences_dict = {} + self.cut_off_points = [] + for video_id in video_ids: + assert video_id in captions + for cap in captions[video_id]: + cap_txt = " ".join(cap) + self.sentences_dict[len(self.sentences_dict)] = (video_id, cap_txt) + self.cut_off_points.append(len(self.sentences_dict)) + + ## below variables are used to multi-sentences retrieval + # self.cut_off_points: used to tag the label when calculate the metric + # self.sentence_num: used to cut the sentence representation + # self.video_num: used to cut the video representation + self.multi_sentence_per_video = True # !!! important tag for eval + if self.subset == "val" or self.subset == "test": + self.sentence_num = len(self.sentences_dict) + self.video_num = len(video_ids) + assert len(self.cut_off_points) == self.video_num + print("For {}, sentence number: {}".format(self.subset, self.sentence_num)) + print("For {}, video number: {}".format(self.subset, self.video_num)) + + print("Video number: {}".format(len(self.video_dict))) + print("Total Paire: {}".format(len(self.sentences_dict))) + + self.sample_len = len(self.sentences_dict) + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + + def __len__(self): + return self.sample_len + + def _get_text(self, video_id, caption): + k = 1 + choice_video_ids = [video_id] + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i, video_id in enumerate(choice_video_ids): + words = self.tokenizer.tokenize(caption) + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, choice_video_ids + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + # video_mask = np.zeros(self.max_frames, dtype=np.long) + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + + # max_video_length = 0 + max_video_length = [0] * len(choice_video_ids) + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + # video_path = self.video_dict[video_id] + for i, video_id in enumerate(choice_video_ids): + + video_path = os.path.join(self.features_path, "{}.avi".format(video_id)) + if video_path.startswith("s3://"): + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = io.BytesIO(video_bytes) + vreader = VideoReader(video_path, ctx=cpu(0)) + + + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = patch_images + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + # video_mask[:max_video_length] = [1] * max_video_length + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + #print(video.shape, video_mask.shape) + return video, video_mask + + def _get_rawvideo(self, choice_video_ids): + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + max_video_length = [0] * len(choice_video_ids) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + + for i, video_id in enumerate(choice_video_ids): + video_path = self.video_dict[video_id] + + raw_video_data = self.rawVideoExtractor.get_video_data(video_path) + raw_video_data = raw_video_data['video'] + + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + return video, video_mask + + def __getitem__(self, idx): + video_id, caption = self.sentences_dict[idx] + + pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, caption) + # video, video_mask = self._get_rawvideo_dec(choice_video_ids) + video, video_mask = self._get_rawvideo(choice_video_ids) + return pairs_text, pairs_mask, pairs_segment, video, video_mask diff --git a/Downstream/Video-Text-Retrieval/dataloaders/dataloader_vatex_retrieval.py b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_vatex_retrieval.py new file mode 100644 index 0000000..6ce3617 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/dataloader_vatex_retrieval.py @@ -0,0 +1,288 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import os +from torch.utils.data import Dataset +import numpy as np +import pickle +import json +from dataloaders.rawvideo_util import RawVideoExtractor +import io +from decord import VideoReader, cpu +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, InterpolationMode + +try: + from petrel_client.client import Client + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + + +class VATEX_DataLoader(Dataset): + """VATEX dataloader""" + def __init__( + self, + subset, + data_path, + features_path, + tokenizer, + max_words=30, + feature_framerate=1.0, + max_frames=100, + image_resolution=224, + frame_order=0, + slice_framepos=0, + ): + self.data_path = data_path + self.features_path = features_path + self.feature_framerate = feature_framerate + self.image_resolution = image_resolution + self.max_words = max_words + self.max_frames = max_frames + self.tokenizer = tokenizer + # 0: ordinary order; 1: reverse order; 2: random order. + self.frame_order = frame_order + assert self.frame_order in [0, 1, 2] + # 0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly. + self.slice_framepos = slice_framepos + assert self.slice_framepos in [0, 1, 2] + + self.subset = subset + assert self.subset in ["train", "val", "test"] + video_id_path_dict = {} + video_id_path_dict["train"] = os.path.join(self.data_path, "train_list.txt") + video_id_path_dict["val"] = os.path.join(self.data_path, "test_list.txt") + video_id_path_dict["test"] = os.path.join(self.data_path, "test_list.txt") + with open(video_id_path_dict[self.subset], 'r') as fp: + video_ids = [itm.strip() for itm in fp.readlines()] +# ============================================================================================= + + video_json_path_dict = {} + video_json_path_dict["train"] = os.path.join(self.data_path, "vatex_training_v1.0_ceph.json") + video_json_path_dict["val"] = os.path.join(self.data_path, "vatex_validation_v1.0_ceph.json") + video_json_path_dict["test"] = os.path.join(self.data_path, "vatex_validation_v1.0_ceph.json") + captions = {} + video_dict = {} + with open(video_json_path_dict[self.subset], 'r') as f: + json_data = json.load(f) + for itm in json_data: + video_id = itm["videoID"] + if video_id not in video_ids: + continue + if "path" not in itm: + continue + path = itm["path"] + caption = itm["enCap"] + video_dict[video_id] = path + captions[video_id] = caption + +# ============================================================================================================================== + + self.video_dict = video_dict + self.sample_len = 0 + self.sentences_dict = {} + self.cut_off_points = [] + for video_id in video_ids: + if video_id not in captions: + continue + for cap_txt in captions[video_id]: + self.sentences_dict[len(self.sentences_dict)] = (video_id, cap_txt) + self.cut_off_points.append(len(self.sentences_dict)) + + print(f"sentence dict len: {len(self.sentences_dict)}") + print(f"video dict len: {len(video_dict)}") + print(f"video ids len: {len(video_ids)}") + ## below variables are used to multi-sentences retrieval + # self.cut_off_points: used to tag the label when calculate the metric + # self.sentence_num: used to cut the sentence representation + # self.video_num: used to cut the video representation + self.multi_sentence_per_video = True # !!! important tag for eval + if self.subset == "val" or self.subset == "test": + self.sentence_num = len(self.sentences_dict) + self.video_num = len(video_dict) + assert len(self.cut_off_points) == self.video_num + + self.sample_len = len(self.sentences_dict) + self.rawVideoExtractor = RawVideoExtractor(framerate=feature_framerate, size=image_resolution) + self.SPECIAL_TOKEN = {"CLS_TOKEN": "<|startoftext|>", "SEP_TOKEN": "<|endoftext|>", + "MASK_TOKEN": "[MASK]", "UNK_TOKEN": "[UNK]", "PAD_TOKEN": "[PAD]"} + self.transform = Compose([ + Resize(image_resolution, interpolation=InterpolationMode.BICUBIC), + CenterCrop(image_resolution), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + # Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + def __len__(self): + return self.sample_len + + def _get_text(self, video_id, caption): + k = 1 + choice_video_ids = [video_id] + pairs_text = np.zeros((k, self.max_words), dtype=np.long) + pairs_mask = np.zeros((k, self.max_words), dtype=np.long) + pairs_segment = np.zeros((k, self.max_words), dtype=np.long) + + for i, video_id in enumerate(choice_video_ids): + words = self.tokenizer.tokenize(caption) + + words = [self.SPECIAL_TOKEN["CLS_TOKEN"]] + words + total_length_with_CLS = self.max_words - 1 + if len(words) > total_length_with_CLS: + words = words[:total_length_with_CLS] + words = words + [self.SPECIAL_TOKEN["SEP_TOKEN"]] + + input_ids = self.tokenizer.convert_tokens_to_ids(words) + input_mask = [1] * len(input_ids) + segment_ids = [0] * len(input_ids) + while len(input_ids) < self.max_words: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + assert len(input_ids) == self.max_words + assert len(input_mask) == self.max_words + assert len(segment_ids) == self.max_words + + pairs_text[i] = np.array(input_ids) + pairs_mask[i] = np.array(input_mask) + pairs_segment[i] = np.array(segment_ids) + + return pairs_text, pairs_mask, pairs_segment, choice_video_ids + + def _get_rawvideo(self, choice_video_ids): + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + max_video_length = [0] * len(choice_video_ids) + + # Pair x L x T x 3 x H x W + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.rawVideoExtractor.size, self.rawVideoExtractor.size), dtype=np.float) + + for i, video_id in enumerate(choice_video_ids): + video_path = self.video_dict[video_id] + + raw_video_data = self.rawVideoExtractor.get_video_data(video_path) + raw_video_data = raw_video_data['video'] + + if len(raw_video_data.shape) > 3: + raw_video_data_clip = raw_video_data + # L x T x 3 x H x W + raw_video_slice = self.rawVideoExtractor.process_raw_data(raw_video_data_clip) + if self.max_frames < raw_video_slice.shape[0]: + if self.slice_framepos == 0: + video_slice = raw_video_slice[:self.max_frames, ...] + elif self.slice_framepos == 1: + video_slice = raw_video_slice[-self.max_frames:, ...] + else: + sample_indx = np.linspace(0, raw_video_slice.shape[0] - 1, num=self.max_frames, dtype=int) + video_slice = raw_video_slice[sample_indx, ...] + else: + video_slice = raw_video_slice + + video_slice = self.rawVideoExtractor.process_frame_order(video_slice, frame_order=self.frame_order) + + slice_len = video_slice.shape[0] + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = video_slice + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + return video, video_mask + + def _get_rawvideo_dec(self, choice_video_ids, s=None, e=None): + # speed up video decode via decord. + # video_mask = np.zeros(self.max_frames, dtype=np.long) + video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long) + + # max_video_length = 0 + max_video_length = [0] * len(choice_video_ids) + + # T x 3 x H x W + # video = np.zeros((self.max_frames, 3, self.image_resolution, self.image_resolution), dtype=np.float) + video = np.zeros((len(choice_video_ids), self.max_frames, 1, 3, + self.image_resolution, self.image_resolution), dtype=np.float) + + if s is None: + start_time, end_time = None, None + else: + start_time = int(s) + end_time = int(e) + start_time = start_time if start_time >= 0. else 0. + end_time = end_time if end_time >= 0. else 0. + if start_time > end_time: + start_time, end_time = end_time, start_time + elif start_time == end_time: + end_time = start_time + 1 + # video_path = self.video_dict[video_id] + for i, video_id in enumerate(choice_video_ids): + video_path = self.video_dict[video_id] + # video_path = os.path.join(self.features_path, "{}.mp4 ".format(video_id)) + if video_path.startswith("s3://"): + video_bytes = client.get(video_path) + if video_bytes is None: + print("Get video failed from {}".format(video_path)) + continue + video_path = io.BytesIO(video_bytes) + vreader = VideoReader(video_path, ctx=cpu(0)) + + fps = vreader.get_avg_fps() + f_start = 0 if start_time is None else int(start_time * fps) + f_end = int(min(1000000000 if end_time is None else end_time * fps, len(vreader) - 1)) + num_frames = f_end - f_start + 1 + if num_frames > 0: + # T x 3 x H x W + # sample_fps = int(self.video_framerate) + sample_fps = int(self.feature_framerate) + t_stride = int(round(float(fps) / sample_fps)) + + all_pos = list(range(f_start, f_end + 1, t_stride)) + if len(all_pos) > self.max_frames: + sample_pos = [all_pos[_] for _ in np.linspace(0, len(all_pos) - 1, num=self.max_frames, dtype=int)] + else: + sample_pos = all_pos + + patch_images = [Image.fromarray(f) for f in vreader.get_batch(sample_pos).asnumpy()] + patch_images = torch.stack([self.transform(img) for img in patch_images]) + + patch_images = patch_images.unsqueeze(1) + + slice_len = patch_images.shape[0] + # max_video_length = max_video_length if max_video_length > slice_len else slice_len + max_video_length[i] = max_video_length[i] if max_video_length[i] > slice_len else slice_len + if slice_len < 1: + pass + else: + video[i][:slice_len, ...] = patch_images + else: + print("video path: {} error. video id: {}".format(video_path, video_id)) + + # video_mask[:max_video_length] = [1] * max_video_length + for i, v_length in enumerate(max_video_length): + video_mask[i][:v_length] = [1] * v_length + + #print(video.shape, video_mask.shape) + return video, video_mask + + def __getitem__(self, idx): + video_id, caption = self.sentences_dict[idx] + + pairs_text, pairs_mask, pairs_segment, choice_video_ids = self._get_text(video_id, caption) + video, video_mask = self._get_rawvideo_dec(choice_video_ids) + return pairs_text, pairs_mask, pairs_segment, video, video_mask \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/dataloaders/rawvideo_util.py b/Downstream/Video-Text-Retrieval/dataloaders/rawvideo_util.py new file mode 100644 index 0000000..24f73b2 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/dataloaders/rawvideo_util.py @@ -0,0 +1,122 @@ +import torch as th +import numpy as np +from PIL import Image +# pytorch=1.7.1 +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +# pip install opencv-python +import cv2 +import io +try: + from petrel_client.client import Client + + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + +class RawVideoExtractorCV2(): + def __init__(self, centercrop=False, size=224, framerate=-1, ): + self.centercrop = centercrop + self.size = size + self.framerate = framerate + self.transform = self._transform(self.size) + + def _transform(self, n_px): + return Compose([ + Resize(n_px, interpolation=Image.BICUBIC), + CenterCrop(n_px), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None): + if start_time is not None or end_time is not None: + assert isinstance(start_time, int) and isinstance(end_time, int) \ + and start_time > -1 and end_time > start_time + assert sample_fp > -1 + + # Samples a frame sample_fp X frames. + # cap = cv2.VideoCapture(video_file) + if video_file.startswith("s3://"): + # video_bytes = client.get(video_file) + # assert video_bytes is not None, "Get video failed from {}".format(video_file) + # video_file = video_bytes + # video_file = io.BytesIO(video_bytes) + # print(video_file) + presigned_url = client.generate_presigned_url(video_file, client_method ='get_object', expires_in=36000) + assert presigned_url is not None, "Get video failed from {}".format(presigned_url) + cap = cv2.VideoCapture(presigned_url) + + frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + fps = int(cap.get(cv2.CAP_PROP_FPS)) + + total_duration = (frameCount + fps - 1) // fps + start_sec, end_sec = 0, total_duration + + if start_time is not None: + start_sec, end_sec = start_time, end_time if end_time <= total_duration else total_duration + cap.set(cv2.CAP_PROP_POS_FRAMES, int(start_time * fps)) + + interval = 1 + if sample_fp > 0: + interval = fps // sample_fp + else: + sample_fp = fps + if interval == 0: interval = 1 + + inds = [ind for ind in np.arange(0, fps, interval)] + assert len(inds) >= sample_fp + inds = inds[:sample_fp] + + ret = True + images, included = [], [] + + for sec in np.arange(start_sec, end_sec + 1): + if not ret: break + sec_base = int(sec * fps) + for ind in inds: + cap.set(cv2.CAP_PROP_POS_FRAMES, sec_base + ind) + ret, frame = cap.read() + if not ret: break + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + images.append(preprocess(Image.fromarray(frame_rgb).convert("RGB"))) + + cap.release() + + if len(images) > 0: + video_data = th.tensor(np.stack(images)) + else: + video_data = th.zeros(1) + return {'video': video_data} + + def get_video_data(self, video_path, start_time=None, end_time=None): + image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time) + return image_input + + def process_raw_data(self, raw_video_data): + tensor_size = raw_video_data.size() + tensor = raw_video_data.view(-1, 1, tensor_size[-3], tensor_size[-2], tensor_size[-1]) + return tensor + + def process_frame_order(self, raw_video_data, frame_order=0): + # 0: ordinary order; 1: reverse order; 2: random order. + if frame_order == 0: + pass + elif frame_order == 1: + reverse_order = np.arange(raw_video_data.size(0) - 1, -1, -1) + raw_video_data = raw_video_data[reverse_order, ...] + elif frame_order == 2: + random_order = np.arange(raw_video_data.size(0)) + np.random.shuffle(random_order) + raw_video_data = raw_video_data[random_order, ...] + + return raw_video_data + +# An ordinary video frame extractor based CV2 +RawVideoExtractor = RawVideoExtractorCV2 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_activitynet_infer.sh b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_activitynet_infer.sh new file mode 100644 index 0000000..7cdd8ff --- /dev/null +++ b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_activitynet_infer.sh @@ -0,0 +1,23 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=reserved -N1 --job-name=infer_anet \ +python -u -m inference \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --batch_size_val=16 \ + --data_path="./data/ActivityNet/" \ + --features_path="" \ + --datatype="activity" \ + --max_words=77 \ + --max_frames=64 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --clip_evl \ + --output_dir="./log/msvd_kc4_bs64_vitl14/inference" \ + --finetuned_path="./log/msvd_kc4_bs64_vitl14/pytorch_model.bin" \ + --pretrained_path="" \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_didemo_infer.sh b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_didemo_infer.sh new file mode 100644 index 0000000..10790b8 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_didemo_infer.sh @@ -0,0 +1,25 @@ +# srun -p gcc --gres=gpu:1 --cpus-per-task=16 --quotatype=reserved -N1 --job-name=infer_didemo \ +python -u -m inference \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --batch_size_val=8 \ + --data_path="./data/DiDeMo/" \ + --features_path="" \ + --datatype="didemo" \ + --max_words=77 \ + --max_frames=32 \ + --feature_framerate=1 \ + --freeze_layer_num=0 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --output_dir="./log/didemo_kc4_bs64_vitl14/inference" \ + --finetuned_path="./log/didemo_kc4_bs64_vitl14/pytorch_model.bin" \ + --interaction=no \ + --clip_evl \ + --pretrained_path="" \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_lsmdc_infer.sh b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_lsmdc_infer.sh new file mode 100644 index 0000000..1da0814 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_lsmdc_infer.sh @@ -0,0 +1,22 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=reserved -N1 --job-name=infer_lsmdc \ +python -u -m inference \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --batch_size_val=16 \ + --features_path="" \ + --data_path="./data/LSMDC/" \ + --datatype="lsmdc" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --output_dir="./log/lsmdc_kc4_bs64_vitl14" \ + --finetuned_path="./log/lsmdc_kc4_bs64_vitl14/pytorch_model.bin" \ + --freeze_layer_num=0 \ + --clip_evl \ + --pretrained_path="" \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_msrvtt_infer.sh b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_msrvtt_infer.sh new file mode 100644 index 0000000..5183e30 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_msrvtt_infer.sh @@ -0,0 +1,25 @@ +# srun -p video -N1 -n1 --gres=gpu:1 --cpus-per-task=16 --quotatype=auto --job-name=infer_msrvtt \ +python -u -m inference \ + --do_eval \ + --num_thread_reader=4 \ + --n_display=50 \ + --train_csv="./data/MSR-VTT/anns/MSRVTT_train.9k.csv" \ + --val_csv="./data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv" \ + --data_path="./data/MSR-VTT/anns/MSRVTT_data.json" \ + --lr=1e-4 \ + --max_words=77 \ + --max_frames=12 \ + --batch_size_val=16 \ + --datatype="msrvtt" \ + --expand_msrvtt_sentences \ + --feature_framerate=1 \ + --slice_framepos=2 \ + --linear_patch=2d \ + --sim_header=meanP \ + --loose_type \ + --pretrained_clip_name="ViT-L/14" \ + --clip_evl \ + --pretrained_path="" \ + --finetuned_path="./log/msrvtt_kc4_bs64_vitl14/pytorch_model.bin" \ + --output_dir="./log/msrvtt_kc4_bs64_vitl14/inference" \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_msvd_infer.sh b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_msvd_infer.sh new file mode 100644 index 0000000..813036a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_msvd_infer.sh @@ -0,0 +1,25 @@ +# srun -p gcc --gres=gpu:1 --cpus-per-task=16 --quotatype=reserved -N1 --job-name=infer_msvd \ +python -u -m inference \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --batch_size_val=16 \ + --features_path="" \ + --data_path="./data/MSVD/" \ + --datatype="msvd" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --output_dir="./log/msvd_kc4_bs64_vitl14/inference" \ + --finetuned_path="./log/msvd_kc4_bs64_vitl14/pytorch_model.bin" \ + --freeze_layer_num=0 \ + --interaction=no \ + --clip_evl \ + --pretrained_path="" \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_vatex_infer.sh b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_vatex_infer.sh new file mode 100644 index 0000000..39af04a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/eval_finetuned_scripts/run_kc4_vatex_infer.sh @@ -0,0 +1,26 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=reserved -N1 --job-name=infer_vatex \ +python -u -m inference \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --coef_lr=5e-3 \ + --batch_size_val=16 \ + --features_path="" \ + --data_path="./data/VATEX/" \ + --datatype="vatex" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --output_dir="./log/vatex_kc4_bs64_vitl14/inference" \ + --finetuned_path="./log/vatex_kc4_bs64_vitl14/pytorch_model.bin" \ + --freeze_layer_num=0 \ + --interaction=no \ + --clip_evl \ + --pretrained_path="" \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/ret_activity.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_activity.sh new file mode 100644 index 0000000..e6f1dc8 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_activity.sh @@ -0,0 +1,24 @@ +# srun -p video --gres=gpu:4 --cpus-per-task=64 --quotatype=auto -N1 --job-name=ret_activity \ +python -m torch.distributed.launch --nproc_per_node=4 main_task_retrieval.py \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-4 \ + --coef_lr=1e-3 \ + --batch_size=32 \ + --batch_size_val=64 \ + --data_path="./data/ActivityNet/" \ + --features_path="" \ + --datatype="activity" \ + --max_words=64 \ + --max_frames=64 \ + --feature_framerate=1 \ + --freeze_layer_num=0 \ + --pretrained_clip_name="ViT-B/16" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --output_dir="" diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/ret_didemo.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_didemo.sh new file mode 100644 index 0000000..10005f2 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_didemo.sh @@ -0,0 +1,23 @@ +# srun -p video --gres=gpu:8 --cpus-per-task=128 --quotatype=spot -N1 --job-name=ret_didemo \ +python -m torch.distributed.launch --nproc_per_node=4 main_task_retrieval.py \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=1e-2 \ + --batch_size=64 \ + --batch_size_val=64 \ + --data_path="./data/DiDeMo/" \ + --features_path="" \ + --datatype="didemo" \ + --max_words=64 \ + --max_frames=32 \ + --feature_framerate=1 \ + --freeze_layer_num=0 \ + --pretrained_clip_name="ViT-B/16" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --output_dir="" diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/ret_lsmdc.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_lsmdc.sh new file mode 100644 index 0000000..d3cd2f4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_lsmdc.sh @@ -0,0 +1,24 @@ +# srun -p video --gres=gpu:4 --cpus-per-task=64 --quotatype=auto -N1 --job-name=ret_lsmdc \ +python -m torch.distributed.launch --nproc_per_node=4 main_task_retrieval.py \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-4 \ + --coef_lr=1e-3 \ + --batch_size=128 \ + --batch_size_val=128 \ + --data_path="./data/LSMDC/" \ + --features_path="" \ + --datatype="lsmdc" \ + --max_words=32 \ + --max_frames=12 \ + --feature_framerate=1 \ + --freeze_layer_num=0 \ + --pretrained_clip_name="ViT-B/16" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header="meanP" \ + --output_dir="" \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/ret_msrvtt.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_msrvtt.sh new file mode 100644 index 0000000..e84c086 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_msrvtt.sh @@ -0,0 +1,27 @@ +# srun -p video --gres=gpu:4 --cpus-per-task=64 --quotatype=auto -N1 --job-name=ret_msrvtt \ +python -m torch.distributed.launch --nproc_per_node=4 main_task_retrieval.py \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=5e-3 \ + --batch_size=128 \ + --batch_size_val=128 \ + --train_csv="./data/MSR-VTT/anns/MSRVTT_train.9k.csv" \ + --val_csv="./data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv" \ + --features_path="" \ + --data_path="./data/MSR-VTT/anns/MSRVTT_data.json" \ + --datatype="msrvtt" \ + --expand_msrvtt_sentences \ + --max_words=32 \ + --max_frames=12 \ + --freeze_layer_num=0 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-B/32" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header="meanP" \ + --output_dir="" \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/ret_msvd.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_msvd.sh new file mode 100644 index 0000000..cf64af9 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_msvd.sh @@ -0,0 +1,24 @@ +# srun -p video --gres=gpu:4 --cpus-per-task=64 --quotatype=auto -N1 --job-name=ret_msvd \ +python -m torch.distributed.launch --nproc_per_node=4 main_task_retrieval.py \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-4 \ + --coef_lr=1e-3 \ + --batch_size=128 \ + --batch_size_val=128 \ + --features_path="" \ + --data_path="./data/MSVD/" \ + --datatype="msvd" \ + --max_words=32 \ + --max_frames=12 \ + --feature_framerate=1 \ + --freeze_layer_num=0 \ + --pretrained_clip_name="ViT-B/16" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header="meanP" \ + --output_dir="" diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/ret_vatex.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_vatex.sh new file mode 100644 index 0000000..fb416f1 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/ret_vatex.sh @@ -0,0 +1,24 @@ +# srun -p video --gres=gpu:4 --cpus-per-task=64 --quotatype=auto -N1 --job-name=ret_vatex \ +python -m torch.distributed.launch --nproc_per_node=4 main_task_retrieval.py \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-4 \ + --coef_lr=1e-3 \ + --batch_size=128 \ + --batch_size_val=128 \ + --features_path="" \ + --data_path="./data/VATEX/" \ + --datatype="vatex" \ + --max_words=32 \ + --max_frames=12 \ + --feature_framerate=1 \ + --freeze_layer_num=0 \ + --pretrained_clip_name="ViT-B/16" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header="meanP" \ + --output_dir="" diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/train_activitynet.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/train_activitynet.sh new file mode 100644 index 0000000..a4f205e --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/train_activitynet.sh @@ -0,0 +1,26 @@ +# srun -p video -N1 -n8 --gres=gpu:8 --job-name=train_anet --quotatype=reserved --cpus-per-task=16 \ +python -u -m main_task_retrieval \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=30 \ + --lr=1e-3 \ + --coef_lr=2e-3 \ + --batch_size=64 \ + --batch_size_val=16 \ + --data_path="./data/ActivityNet/" \ + --features_path="" \ + --datatype="activity" \ + --max_words=77 \ + --max_frames=64 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/train_didemo.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/train_didemo.sh new file mode 100644 index 0000000..8d88932 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/train_didemo.sh @@ -0,0 +1,26 @@ +# srun -p video -N1 -n8 --gres=gpu:8 --job-name=train_didemo --quotatype=reserved --cpus-per-task=16 \ +python -u -m main_task_retrieval \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=2e-3 \ + --batch_size=16 \ + --batch_size_val=16 \ + --data_path="./data/DiDeMo/" \ + --features_path="" \ + --datatype="didemo" \ + --max_words=77 \ + --max_frames=32 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/train_lsmdc.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/train_lsmdc.sh new file mode 100644 index 0000000..3ca1df8 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/train_lsmdc.sh @@ -0,0 +1,27 @@ +# srun -p video -N1 -n8 --gres=gpu:8 --cpus-per-task=12 --quotatype=reserved --job-name=train_lsmdc \ +python -u -m main_task_retrieval \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=2e-3 \ + --batch_size=64 \ + --batch_size_val=16 \ + --features_path="" \ + --data_path="./data/LSMDC/" \ + --datatype="lsmdc" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --output_dir="" \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --mergeclip=True \ + --clip_evl \ + --pretrained_path="" \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/train_msrvtt.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/train_msrvtt.sh new file mode 100644 index 0000000..98a4744 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/train_msrvtt.sh @@ -0,0 +1,27 @@ +# srun -p video -N1 -n4 --gres=gpu:4 --job-name=train_msrvtt --quotatype=reserved --cpus-per-task=16 \ +python -u -m main_task_retrieval \ + --do_train \ + --num_thread_reader=4 \ + --epochs=5 \ + --batch_size=128 \ + --n_display=50 \ + --train_csv="data/MSR-VTT/anns/MSRVTT_train.9k.csv" \ + --val_csv="data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv" \ + --data_path="data/MSR-VTT/anns/MSRVTT_data.json" \ + --lr=1e-3 \ + --max_words=77 \ + --max_frames=12 \ + --batch_size_val=16 \ + --datatype="msrvtt" \ + --expand_msrvtt_sentences \ + --feature_framerate=1 \ + --coef_lr=4e-3 \ + --freeze_layer_num=0 \ + --slice_framepos=2 \ + --linear_patch=2d \ + --sim_header meanP \ + --loose_type \ + --pretrained_clip_name="ViT-L/14" \ + --output_dir="" \ + --clip_evl \ + --pretrained_path="" \ diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/train_msvd.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/train_msvd.sh new file mode 100644 index 0000000..c0e5ec8 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/train_msvd.sh @@ -0,0 +1,26 @@ +# srun -p video -N1 -n8 --gres=gpu:8 --cpus-per-task=16 --quotatype=reserved --job-name=train_msvd \ +python -u -m main_task_retrieval \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=0.0025 \ + --batch_size=64 \ + --batch_size_val=16 \ + --features_path="" \ + --data_path="./data/MSVD/" \ + --datatype="msvd" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --output_dir="" \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --pretrained_path="" \ diff --git a/Downstream/Video-Text-Retrieval/finetune_scripts/train_vatex.sh b/Downstream/Video-Text-Retrieval/finetune_scripts/train_vatex.sh new file mode 100644 index 0000000..9dac799 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/finetune_scripts/train_vatex.sh @@ -0,0 +1,26 @@ +# srun -p video -N1 -n8 --gres=gpu:8 --job-name=train_vatex --quotatype=reserved --cpus-per-task=16 \ +python -u -m main_task_retrieval \ + --do_train \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=5e-3 \ + --batch_size=128 \ + --batch_size_val=16 \ + --features_path="s3://video_pub/VATEX/" \ + --data_path="./data/VATEX/" \ + --datatype="vatex" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ diff --git a/Downstream/Video-Text-Retrieval/inference.py b/Downstream/Video-Text-Retrieval/inference.py new file mode 100644 index 0000000..f88d09c --- /dev/null +++ b/Downstream/Video-Text-Retrieval/inference.py @@ -0,0 +1,625 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import torch +import numpy as np +import random +import os +from metrics import compute_metrics, tensor_text_to_video_metrics, tensor_video_to_text_sim +import time +import argparse +from modules.tokenization_clip import SimpleTokenizer as ClipTokenizer +from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE +from modules.modeling import CLIP4Clip +from modules.optimization import BertAdam + +from util import parallel_apply, get_logger +from dataloaders.data_dataloaders import DATALOADER_DICT +import torch.distributed as dist +import subprocess +from ipdb import set_trace +import torch.nn as nn +from scipy.special import softmax + +# torch.distributed.init_process_group(backend="nccl") + +global logger + +def get_args(description='CLIP4Clip on Retrieval Task'): + parser = argparse.ArgumentParser(description=description) + parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + + parser.add_argument('--train_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_train.9k.csv', help='') + parser.add_argument('--val_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv', help='') + parser.add_argument('--data_path', type=str, default='data/caption.pickle', help='data pickle file path') + # parser.add_argument('--features_path', type=str, default='data/videos_feature.pickle', help='feature path') + parser.add_argument('--features_path', type=str, default='s3://video_pub/MSR-VTT/videos', help='feature path') + + parser.add_argument('--num_thread_reader', type=int, default=4, help='') + parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate') + parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit') + parser.add_argument('--batch_size', type=int, default=256, help='batch size') + parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval') + parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay') + parser.add_argument('--n_display', type=int, default=100, help='Information display frequence') + parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension') + parser.add_argument('--seed', type=int, default=42, help='random seed') + parser.add_argument('--max_words', type=int, default=20, help='') + parser.add_argument('--max_frames', type=int, default=100, help='') + parser.add_argument('--feature_framerate', type=int, default=1, help='') + parser.add_argument('--margin', type=float, default=0.1, help='margin for loss') + parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample') + parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative') + parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader') + + #parser.add_argument("--output_dir", default=None, type=str, required=True, + # help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--output_dir", default='/mnt/lustre/xujilan.vendor/exps/clip4clip', type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module") + parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.") + parser.add_argument("--resume_model", default=None, type=str, required=False, help="Resume train model.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + parser.add_argument("--warmup_proportion", default=0.1, type=float, + help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.") + + parser.add_argument("--cache_dir", default="", type=str, + help="Where do you want to store the pre-trained models downloaded from s3") + + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + + parser.add_argument("--task_type", default="retrieval", type=str, help="Point the task `retrieval` to finetune.") + parser.add_argument("--datatype", default="msrvtt", type=str, help="Point the dataset to finetune.") + + parser.add_argument("--world_size", default=0, type=int, help="distribted training") + parser.add_argument("--local_rank", default=0, type=int, help="distribted training") + parser.add_argument("--rank", default=0, type=int, help="distribted training") + parser.add_argument('--coef_lr', type=float, default=1., help='coefficient for bert branch.') + parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).") + parser.add_argument('--sampled_use_mil', action='store_true', help="Whether MIL, has a high priority than use_mil.") + + parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.") + parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help="Layer NO. of visual.") + parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help="Layer NO. of cross.") + + parser.add_argument('--loose_type', action='store_true', help="Default using tight type for retrieval.") + parser.add_argument('--expand_msrvtt_sentences', action='store_true', help="") + + parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2], + help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.") + parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2], + help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.") + + parser.add_argument('--freeze_layer_num', type=int, default=0, help="Layer NO. of CLIP need to freeze.") + parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2], + help="0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.") + parser.add_argument('--linear_patch', type=str, default="2d", choices=["2d", "3d"], + help="linear projection of flattened patches.") + parser.add_argument('--sim_header', type=str, default="meanP", + choices=["meanP", "seqLSTM", "seqTransf", "tightTransf"], + help="choice a similarity header.") + + #### CLIP KC/EVL ###### + parser.add_argument("--zeroshot", action='store_true', help="Choose a CLIP version") + parser.add_argument("--pretrained_clip_name", default="ViT-B/32", type=str, help="Choose a CLIP version") + parser.add_argument("--clip_evl", action='store_true', help="Choose a CLIP version") + parser.add_argument("--clip_kc", action='store_true', help="Choose a CLIP version") + parser.add_argument("--use_dsl", action='store_true', help="Choose a CLIP version") + parser.add_argument("--clip_kc2", action='store_true', help="This is for ViT-B/16") + parser.add_argument("--clip_kc4", action='store_true', help="This is for ViT-L/14") + + + ### DRL ### + parser.add_argument("--interaction", type=str, default='no', help="Choose a CLIP version") + parser.add_argument("--wti_arch", type=int, default=0, help="Choose a CLIP version") + parser.add_argument("--cdcr", type=int, default=0, help="Choose a CLIP version") + parser.add_argument("--pretrained_path", type=str, default=None, help="Choose a CLIP version") + parser.add_argument("--mergeclip", type=bool, default=False, help="Choose a CLIP version") + parser.add_argument("--mergeweight", type=float, default=0.5, help="Choose a CLIP version") + parser.add_argument("--use_capdecoder", type=bool, default=False, help="Choose a CLIP version") + + parser.add_argument("--finetuned_path", type=str, default=None, help="Choose a CLIP version") + + args = parser.parse_args() + + if args.sim_header == "tightTransf": + args.loose_type = False + + # Check paramenters + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + if not args.do_train and not args.do_eval: + raise ValueError("At least one of `do_train` or `do_eval` must be True.") + + args.batch_size = int(args.batch_size / args.gradient_accumulation_steps) + + return args + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + +def init_distributed_mode(args): + # launched with torch.distributed.launch + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + # launched with submitit on a slurm cluster + elif 'SLURM_PROCID' in os.environ: + #args.rank = int(os.environ['SLURM_PROCID']) + #args.gpu = args.rank % torch.cuda.device_count() + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = os.environ['SLURM_NTASKS'] + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + addr = subprocess.getoutput( + 'scontrol show hostname {} | head -n1'.format(node_list) + ) + master_port = os.environ.get('MASTER_PORT', '29498') + master_port = "29481" + os.environ['MASTER_PORT'] = master_port + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['RANK'] = str(proc_id) + os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) + os.environ['LOCAL_SIZE'] = str(num_gpus) + args.dist_url = 'env://' + args.world_size = int(ntasks) + args.rank = int(proc_id) + args.gpu = int(proc_id % num_gpus) + print(f'SLURM MODE: proc_id: {proc_id}, ntasks: {ntasks}, node_list: {node_list}, num_gpus:{num_gpus}, addr:{addr}, master port:{master_port}' ) + + # launched naively with `python main_dino.py` + # we manually add MASTER_ADDR and MASTER_PORT to env variables + elif torch.cuda.is_available(): + print('Will run the code on one GPU.') + args.rank, args.gpu, args.world_size = 0, 0, 1 + os.environ['MASTER_ADDR'] = '127.0.0.1' + os.environ['MASTER_PORT'] = '29500' + else: + print('Does not support training without GPU.') + sys.exit(1) + + dist.init_process_group( + backend="nccl", + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, + ) + + torch.cuda.set_device(args.gpu) + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + dist.barrier() + setup_for_distributed(args.rank == 0) + +# def init_distributed_mode(args): +# # launched with torch.distributed.launch +# if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: +# args.rank = int(os.environ["RANK"]) +# args.world_size = int(os.environ['WORLD_SIZE']) +# args.gpu = int(os.environ['LOCAL_RANK']) +# # launched with submitit on a slurm cluster +# if 'SLURM_PROCID' in os.environ: +# proc_id = int(os.environ['SLURM_PROCID']) +# ntasks = os.environ['SLURM_NTASKS'] +# node_list = os.environ['SLURM_NODELIST'] +# num_gpus = torch.cuda.device_count() +# addr = subprocess.getoutput( +# 'scontrol show hostname {} | head -n1'.format(node_list) +# ) +# master_port = os.environ.get('MASTER_PORT', '29489') +# os.environ['MASTER_PORT'] = master_port +# os.environ['MASTER_ADDR'] = addr +# os.environ['WORLD_SIZE'] = str(ntasks) +# os.environ['RANK'] = str(proc_id) +# args.dist_url = 'env://' +# args.world_size = int(ntasks) * num_gpus +# print(f'SLURM MODE: proc_id: {proc_id}, ntasks: {ntasks}, node_list: {node_list}, num_gpus:{num_gpus}, addr:{addr}, master port:{master_port}' ) + +# dist.init_process_group( +# backend="nccl", +# init_method=args.dist_url, +# world_size=args.world_size, +# rank=args.rank, +# ) + +# torch.cuda.set_device(args.gpu) +# print('| distributed init (rank {}): {}'.format( +# args.rank, args.dist_url), flush=True) +# dist.barrier() +# setup_for_distributed(args.rank == 0) + +def set_seed_logger(args): + global logger + # predefining random initial seeds + random.seed(args.seed) + os.environ['PYTHONHASHSEED'] = str(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU. + torch.backends.cudnn.benchmark = False + torch.backends.cudnn.deterministic = True + + # world_size = torch.distributed.get_world_size() + # torch.cuda.set_device(args.local_rank) + # args.world_size = world_size + # rank = torch.distributed.get_rank() + # args.rank = rank + init_distributed_mode(args) + + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir, exist_ok=True) + + logger = get_logger(os.path.join(args.output_dir, "log.txt")) + + if args.rank == 0: + logger.info("Effective parameters:") + for key in sorted(args.__dict__): + logger.info(" <<< {}: {}".format(key, args.__dict__[key])) + + return args + +def init_device(args, local_rank): + global logger + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.rank) + + n_gpu = torch.cuda.device_count() + logger.info("device: {} n_gpu: {}".format(device, n_gpu)) + args.n_gpu = n_gpu + + if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0: + raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format( + args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu)) + + return device, n_gpu + +def init_model(args, device, n_gpu, local_rank): + + if args.init_model: + model_state_dict = torch.load(args.init_model, map_location='cpu') + else: + model_state_dict = None + + # Prepare model + cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed') + model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) + + + # set_trace() + ### here we load the finetuned model (if needed) ### + if args.finetuned_path is not None: + finetuned_ckpt = torch.load(args.finetuned_path, map_location='cpu') + model.load_state_dict(finetuned_ckpt) + # set_trace() + + model.cuda() + + return model + +def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.): + + if hasattr(model, 'module'): + model = model.module + + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + + decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)] + no_decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)] + + decay_clip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." in n] + decay_noclip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." not in n] + + no_decay_clip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." in n] + no_decay_noclip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." not in n] + + weight_decay = 0.2 + optimizer_grouped_parameters = [ + {'params': [p for n, p in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': args.lr * coef_lr}, + {'params': [p for n, p in decay_noclip_param_tp], 'weight_decay': weight_decay}, + {'params': [p for n, p in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr}, + {'params': [p for n, p in no_decay_noclip_param_tp], 'weight_decay': 0.0} + ] + + scheduler = None + optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion, + schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-6, + t_total=num_train_optimization_steps, weight_decay=weight_decay, + max_grad_norm=1.0) + + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[torch.cuda.current_device()], output_device=torch.cuda.current_device(), + find_unused_parameters=False) + #model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], + # output_device=local_rank, find_unused_parameters=True) + + return optimizer, scheduler, model + +# def load_model(epoch, args, n_gpu, device, model_file=None): +# #if model_file is None or len(model_file) == 0: +# # model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch)) +# if model_file is None or len(model_file) == 0: +# model_file = os.path.join(args.output_dir, "pytorch_model.bin") +# if os.path.exists(model_file): +# model_state_dict = torch.load(model_file, map_location='cpu') +# if args.rank == 0: +# logger.info("Model loaded from %s", model_file) +# # Prepare model +# # cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed') +# # model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) +# ### Prepare Model ### +# set_trace() +# ### here we load the finetuned model ### +# if args.finetuned_path is not None: +# model.load_state_dict(model_state_dict) +# set_trace() + +# model.to(device) +# else: +# model = None +# return model + +def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list): + sim_matrix = [] + for idx1, b1 in enumerate(batch_list_t): + input_mask, segment_ids, *_tmp = b1 + sequence_output = batch_sequence_output_list[idx1] + each_row = [] + for idx2, b2 in enumerate(batch_list_v): + video_mask, *_tmp = b2 + visual_output = batch_visual_output_list[idx2] + b1b2_logits, *_tmp = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask, + loose_type=model.loose_type) + b1b2_logits = b1b2_logits.cpu().detach().numpy() + each_row.append(b1b2_logits) + each_row = np.concatenate(tuple(each_row), axis=-1) + sim_matrix.append(each_row) + return sim_matrix + + +def eval_epoch(args, model, test_dataloader, device, n_gpu): + + if hasattr(model, 'module'): + model = model.module.to(device) + else: + model = model.to(device) + + # ################################################################# + ## below variables are used to multi-sentences retrieval + # multi_sentence_: important tag for eval + # cut_off_points: used to tag the label when calculate the metric + # sentence_num: used to cut the sentence representation + # video_num: used to cut the video representation + # ################################################################# + multi_sentence_ = False + cut_off_points_, sentence_num_, video_num_ = [], -1, -1 + if hasattr(test_dataloader.dataset, 'multi_sentence_per_video') \ + and test_dataloader.dataset.multi_sentence_per_video: + multi_sentence_ = True + cut_off_points_ = test_dataloader.dataset.cut_off_points + sentence_num_ = test_dataloader.dataset.sentence_num + video_num_ = test_dataloader.dataset.video_num + cut_off_points_ = [itm - 1 for itm in cut_off_points_] + + if multi_sentence_: + logger.warning("Eval under the multi-sentence per video clip setting.") + logger.warning("sentence num: {}, video num: {}".format(sentence_num_, video_num_)) + + model.eval() + with torch.no_grad(): + batch_list_t = [] + batch_list_v = [] + batch_sequence_output_list, batch_visual_output_list = [], [] + total_video_num = 0 + + # ---------------------------- + # 1. cache the features + # ---------------------------- + for bid, batch in enumerate(test_dataloader): + batch = tuple(t.to(device) for t in batch) + input_ids, input_mask, segment_ids, video, video_mask = batch + + if multi_sentence_: + # multi-sentences retrieval means: one clip has two or more descriptions. + b, *_t = video.shape + sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask) + batch_sequence_output_list.append(sequence_output) + batch_list_t.append((input_mask, segment_ids,)) + + s_, e_ = total_video_num, total_video_num + b + filter_inds = [itm - s_ for itm in cut_off_points_ if itm >= s_ and itm < e_] + + if len(filter_inds) > 0: + video, video_mask = video[filter_inds, ...], video_mask[filter_inds, ...] + visual_output = model.get_visual_output(video, video_mask) + batch_visual_output_list.append(visual_output) + batch_list_v.append((video_mask,)) + total_video_num += b + else: + sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask) + + batch_sequence_output_list.append(sequence_output) + batch_list_t.append((input_mask, segment_ids,)) + + batch_visual_output_list.append(visual_output) + batch_list_v.append((video_mask,)) + + print("{}/{}\r".format(bid, len(test_dataloader)), end="") + + # ---------------------------------- + # 2. calculate the similarity + # ---------------------------------- + if n_gpu < 1: + device_ids = list(range(n_gpu)) + batch_list_t_splits = [] + batch_list_v_splits = [] + batch_t_output_splits = [] + batch_v_output_splits = [] + bacth_len = len(batch_list_t) + split_len = (bacth_len + n_gpu - 1) // n_gpu + for dev_id in device_ids: + s_, e_ = dev_id * split_len, (dev_id + 1) * split_len + if dev_id == 0: + batch_list_t_splits.append(batch_list_t[s_:e_]) + batch_list_v_splits.append(batch_list_v) + + batch_t_output_splits.append(batch_sequence_output_list[s_:e_]) + batch_v_output_splits.append(batch_visual_output_list) + else: + devc = torch.device('cuda:{}'.format(str(dev_id))) + devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_t[s_:e_]] + batch_list_t_splits.append(devc_batch_list) + devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_v] + batch_list_v_splits.append(devc_batch_list) + + devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]] + batch_t_output_splits.append(devc_batch_list) + devc_batch_list = [b.to(devc) for b in batch_visual_output_list] + batch_v_output_splits.append(devc_batch_list) + + parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id], + batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids] + parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids) + sim_matrix = [] + for idx in range(len(parallel_outputs)): + sim_matrix += parallel_outputs[idx] + sim_matrix = np.concatenate(tuple(sim_matrix), axis=0) + else: + sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list) + sim_matrix = np.concatenate(tuple(sim_matrix), axis=0) + + sim_matrix_dsl = sim_matrix * softmax(sim_matrix, axis=0) + sim_matrix_dsl_T = sim_matrix.T * softmax(sim_matrix.T, axis=0) + # np.save('result_lsmdc.npy', sim_matrix) + + if multi_sentence_: + logger.info("before reshape, sim matrix size: {} x {}".format(sim_matrix.shape[0], sim_matrix.shape[1])) + cut_off_points2len_ = [itm + 1 for itm in cut_off_points_] + max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)]) + sim_matrix_new = [] + for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_): + sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_], + np.full((max_length-e_+s_, sim_matrix.shape[1]), -np.inf)), axis=0)) + sim_matrix = np.stack(tuple(sim_matrix_new), axis=0) + logger.info("after reshape, sim matrix size: {} x {} x {}". + format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2])) + + tv_metrics = tensor_text_to_video_metrics(sim_matrix) + vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix)) + + # dsl + cut_off_points2len_ = [itm + 1 for itm in cut_off_points_] + max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)]) + sim_matrix_new = [] + for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_): + sim_matrix_new.append(np.concatenate((sim_matrix_dsl[s_:e_], + np.full((max_length-e_+s_, sim_matrix_dsl.shape[1]), -np.inf)), axis=0)) + sim_matrix_dsl = np.stack(tuple(sim_matrix_new), axis=0) + logger.info("after reshape, sim matrix size: {} x {} x {}". + format(sim_matrix_dsl.shape[0], sim_matrix_dsl.shape[1], sim_matrix_dsl.shape[2])) + + dsl_tv_metrics = tensor_text_to_video_metrics(sim_matrix_dsl) + dsl_vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix_dsl)) + else: + logger.info("sim matrix size: {}, {}".format(sim_matrix.shape[0], sim_matrix.shape[1])) + tv_metrics = compute_metrics(sim_matrix) + vt_metrics = compute_metrics(sim_matrix.T) + dsl_tv_metrics = compute_metrics(sim_matrix_dsl) + # dsl_vt_metrics = compute_metrics(sim_matrix_dsl.T) + dsl_vt_metrics = compute_metrics(sim_matrix_dsl_T) + logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0]))) + + # dsl output + logger.info("------------------------------------------------------------") + logger.info("DSL Text-to-Video:") + logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'. + format(dsl_tv_metrics['R1'], dsl_tv_metrics['R5'], dsl_tv_metrics['R10'], dsl_tv_metrics['MR'], dsl_tv_metrics['MeanR'])) + logger.info("DSL Video-to-Text:") + logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'. + format(dsl_vt_metrics['R1'], dsl_vt_metrics['R5'], dsl_vt_metrics['R10'], dsl_vt_metrics['MR'], dsl_vt_metrics['MeanR'])) + + logger.info("------------------------------------------------------------") + logger.info("Text-to-Video:") + logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'. + format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR'])) + logger.info("Video-to-Text:") + logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'. + format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR'])) + + R1 = tv_metrics['R1'] + return R1 + +def main(): + global logger + args = get_args() + args = set_seed_logger(args) + device, n_gpu = init_device(args, args.rank) + + tokenizer = ClipTokenizer() + + assert args.task_type == "retrieval" + model = init_model(args, device, n_gpu, args.rank) + + ## #################################### + # dataloader loading + ## #################################### + assert args.datatype in DATALOADER_DICT + + assert DATALOADER_DICT[args.datatype]["test"] is not None \ + or DATALOADER_DICT[args.datatype]["val"] is not None + + test_dataloader, test_length = None, 0 + if DATALOADER_DICT[args.datatype]["test"] is not None: + test_dataloader, test_length = DATALOADER_DICT[args.datatype]["test"](args, tokenizer) + + if DATALOADER_DICT[args.datatype]["val"] is not None: + val_dataloader, val_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer, subset="val") + else: + val_dataloader, val_length = test_dataloader, test_length + + ## report validation results if the ["test"] is None + if test_dataloader is None: + test_dataloader, test_length = val_dataloader, val_length + + if args.rank == 0: + logger.info("***** Running test *****") + logger.info(" Num examples = %d", test_length) + logger.info(" Batch size = %d", args.batch_size_val) + logger.info(" Num steps = %d", len(test_dataloader)) + logger.info("***** Running val *****") + logger.info(" Num examples = %d", val_length) + + ## #################################### + ### test #### + ####################################### + if args.rank == 0: + eval_epoch(args, model, test_dataloader, device, n_gpu) + +if __name__ == "__main__": + main() diff --git a/Downstream/Video-Text-Retrieval/main_task_retrieval.py b/Downstream/Video-Text-Retrieval/main_task_retrieval.py new file mode 100644 index 0000000..439e59a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/main_task_retrieval.py @@ -0,0 +1,706 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import torch +import numpy as np +import random +import os +from metrics import compute_metrics, tensor_text_to_video_metrics, tensor_video_to_text_sim +import time +import argparse +from modules.tokenization_clip import SimpleTokenizer as ClipTokenizer +from modules.file_utils import PYTORCH_PRETRAINED_BERT_CACHE +from modules.modeling import CLIP4Clip +from modules.optimization import BertAdam + +from util import parallel_apply, get_logger +from dataloaders.data_dataloaders import DATALOADER_DICT +import torch.distributed as dist +import subprocess +import torch.nn as nn +from scipy.special import softmax + +# torch.distributed.init_process_group(backend="nccl") + +global logger + +def get_args(description='CLIP4Clip on Retrieval Task'): + parser = argparse.ArgumentParser(description=description) + parser.add_argument("--do_pretrain", action='store_true', help="Whether to run training.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + + parser.add_argument('--train_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_train.9k.csv', help='') + parser.add_argument('--val_csv', type=str, default='data/MSR-VTT/anns/MSRVTT_JSFUSION_test.csv', help='') + parser.add_argument('--data_path', type=str, default='data/caption.pickle', help='data pickle file path') + # parser.add_argument('--features_path', type=str, default='data/videos_feature.pickle', help='feature path') + parser.add_argument('--features_path', type=str, default='s3://video_pub/MSR-VTT/videos', help='feature path') + + parser.add_argument('--num_thread_reader', type=int, default=4, help='') + parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate') + parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit') + parser.add_argument('--batch_size', type=int, default=256, help='batch size') + parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval') + parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay') + parser.add_argument('--n_display', type=int, default=100, help='Information display frequence') + parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension') + parser.add_argument('--seed', type=int, default=42, help='random seed') + parser.add_argument('--max_words', type=int, default=20, help='') + parser.add_argument('--max_frames', type=int, default=100, help='') + parser.add_argument('--feature_framerate', type=int, default=1, help='') + parser.add_argument('--margin', type=float, default=0.1, help='margin for loss') + parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample') + parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative') + parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader') + + parser.add_argument("--output_dir", default='/path/to/save/your/experiments/', type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module") + parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.") + parser.add_argument("--resume_model", default=None, type=str, required=False, help="Resume train model.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + parser.add_argument("--warmup_proportion", default=0.1, type=float, + help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.") + + parser.add_argument("--cache_dir", default="", type=str, + help="Where do you want to store the pre-trained models downloaded from s3") + + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + + parser.add_argument("--task_type", default="retrieval", type=str, help="Point the task `retrieval` to finetune.") + parser.add_argument("--datatype", default="msrvtt", type=str, help="Point the dataset to finetune.") + + parser.add_argument("--world_size", default=0, type=int, help="distribted training") + parser.add_argument("--local_rank", default=0, type=int, help="distribted training") + parser.add_argument("--rank", default=0, type=int, help="distribted training") + parser.add_argument('--coef_lr', type=float, default=1., help='coefficient for bert branch.') + parser.add_argument('--use_mil', action='store_true', help="Whether use MIL as Miech et. al. (2020).") + parser.add_argument('--sampled_use_mil', action='store_true', help="Whether MIL, has a high priority than use_mil.") + + parser.add_argument('--text_num_hidden_layers', type=int, default=12, help="Layer NO. of text.") + parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help="Layer NO. of visual.") + parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help="Layer NO. of cross.") + + parser.add_argument('--loose_type', action='store_true', help="Default using tight type for retrieval.") + parser.add_argument('--expand_msrvtt_sentences', action='store_true', help="") + + parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2], + help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.") + parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2], + help="Frame order, 0: ordinary order; 1: reverse order; 2: random order.") + + parser.add_argument('--freeze_layer_num', type=int, default=0, help="Layer NO. of CLIP need to freeze.") + parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2], + help="0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.") + parser.add_argument('--linear_patch', type=str, default="2d", choices=["2d", "3d"], + help="linear projection of flattened patches.") + parser.add_argument('--sim_header', type=str, default="meanP", + choices=["meanP", "seqLSTM", "seqTransf", "tightTransf"], + help="choice a similarity header.") + + #### CLIP EVL ###### + parser.add_argument("--pretrained_clip_name", default="ViT-B/32", type=str, help="ViT Base or Large") + parser.add_argument("--pretrained_path", type=str, default=None, help="Give the upstream pretrained checkpoint (not the finetuned one)") + parser.add_argument("--clip_evl", action='store_true', help="whether to activate clip_evl") + parser.add_argument("--mergeclip", type=bool, default=False, help="whether to merge clip weight") + parser.add_argument("--mergeweight", type=float, default=0.5, help="merge weight from 0 to 1") + + + ### DRL ### + parser.add_argument("--interaction", type=str, default='no', help="interaction mode, refer to DRL") + parser.add_argument("--wti_arch", type=int, default=0, help="wti architecture, refer to DRL") + parser.add_argument("--cdcr", type=int, default=0, help="which cdcr type, refer to DRL") + + args = parser.parse_args() + + if args.sim_header == "tightTransf": + args.loose_type = False + + # Check paramenters + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + if not args.do_train and not args.do_eval: + raise ValueError("At least one of `do_train` or `do_eval` must be True.") + + args.batch_size = int(args.batch_size / args.gradient_accumulation_steps) + + return args + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + +def init_distributed_mode(args): + # launched with torch.distributed.launch + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + # launched with submitit on a slurm cluster + elif 'SLURM_PROCID' in os.environ: + #args.rank = int(os.environ['SLURM_PROCID']) + #args.gpu = args.rank % torch.cuda.device_count() + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = os.environ['SLURM_NTASKS'] + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + addr = subprocess.getoutput( + 'scontrol show hostname {} | head -n1'.format(node_list) + ) + master_port = os.environ.get('MASTER_PORT', '29491') + ## manually set is also ok ## + master_port = "29411" + os.environ['MASTER_PORT'] = master_port + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['RANK'] = str(proc_id) + os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) + os.environ['LOCAL_SIZE'] = str(num_gpus) + args.dist_url = 'env://' + args.world_size = int(ntasks) + args.rank = int(proc_id) + args.gpu = int(proc_id % num_gpus) + print(f'SLURM MODE: proc_id: {proc_id}, ntasks: {ntasks}, node_list: {node_list}, num_gpus:{num_gpus}, addr:{addr}, master port:{master_port}' ) + + # launched naively with `python main_dino.py` + # we manually add MASTER_ADDR and MASTER_PORT to env variables + elif torch.cuda.is_available(): + print('Will run the code on one GPU.') + args.rank, args.gpu, args.world_size = 0, 0, 1 + os.environ['MASTER_ADDR'] = '127.0.0.1' + os.environ['MASTER_PORT'] = '29500' + else: + print('Does not support training without GPU.') + sys.exit(1) + + dist.init_process_group( + backend="nccl", + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, + ) + + torch.cuda.set_device(args.gpu) + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + dist.barrier() + setup_for_distributed(args.rank == 0) + + +def set_seed_logger(args): + global logger + # predefining random initial seeds + random.seed(args.seed) + os.environ['PYTHONHASHSEED'] = str(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU. + torch.backends.cudnn.benchmark = False + torch.backends.cudnn.deterministic = True + + init_distributed_mode(args) + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir, exist_ok=True) + + logger = get_logger(os.path.join(args.output_dir, "log.txt")) + + if args.rank == 0: + logger.info("Effective parameters:") + for key in sorted(args.__dict__): + logger.info(" <<< {}: {}".format(key, args.__dict__[key])) + + return args + +def init_device(args, local_rank): + global logger + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.rank) + + n_gpu = torch.cuda.device_count() + logger.info("device: {} n_gpu: {}".format(device, n_gpu)) + args.n_gpu = n_gpu + + if args.batch_size % args.n_gpu != 0 or args.batch_size_val % args.n_gpu != 0: + raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format( + args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu)) + + return device, n_gpu + +def init_model(args, device, n_gpu, local_rank): + + if args.init_model: + model_state_dict = torch.load(args.init_model, map_location='cpu') + else: + model_state_dict = None + + # Prepare model + cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed') + model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) + + # model.to(device) + model.cuda() + + return model + +def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.): + + if hasattr(model, 'module'): + model = model.module + + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + + decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)] + no_decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)] + + decay_clip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." in n] + decay_noclip_param_tp = [(n, p) for n, p in decay_param_tp if "clip." not in n] + + no_decay_clip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." in n] + no_decay_noclip_param_tp = [(n, p) for n, p in no_decay_param_tp if "clip." not in n] + + weight_decay = 0.2 + optimizer_grouped_parameters = [ + {'params': [p for n, p in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': args.lr * coef_lr}, + {'params': [p for n, p in decay_noclip_param_tp], 'weight_decay': weight_decay}, + {'params': [p for n, p in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': args.lr * coef_lr}, + {'params': [p for n, p in no_decay_noclip_param_tp], 'weight_decay': 0.0} + ] + + scheduler = None + optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion, + schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-6, + t_total=num_train_optimization_steps, weight_decay=weight_decay, + max_grad_norm=1.0) + + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[torch.cuda.current_device()], output_device=torch.cuda.current_device(), + find_unused_parameters=False) + + + return optimizer, scheduler, model + +def save_model(epoch, args, model, optimizer, tr_loss, type_name=""): + # Only save the model it-self + model_to_save = model.module if hasattr(model, 'module') else model + output_model_file = os.path.join( + args.output_dir, "pytorch_model.bin") + optimizer_state_file = os.path.join( + args.output_dir, "pytorch_opt.bin") + + torch.save(model_to_save.state_dict(), output_model_file) + + torch.save({ + 'epoch': epoch, + 'optimizer_state_dict': optimizer.state_dict(), + 'loss': tr_loss, + }, optimizer_state_file) + logger.info("Model saved to %s", output_model_file) + logger.info("Optimizer saved to %s", optimizer_state_file) + return output_model_file + +def load_model(epoch, args, n_gpu, device, model_file=None): + if model_file is None or len(model_file) == 0: + model_file = os.path.join(args.output_dir, "pytorch_model.bin") + if os.path.exists(model_file): + model_state_dict = torch.load(model_file, map_location='cpu') + if args.rank == 0: + logger.info("Model loaded from %s", model_file) + # Prepare model + cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed') + model = CLIP4Clip.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) + + model.to(device) + else: + model = None + return model + +def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0): + global logger + torch.cuda.empty_cache() + model.train() + log_step = args.n_display + start_time = time.time() + total_loss = 0 + + for step, batch in enumerate(train_dataloader): + if n_gpu == 1: + # multi-gpu does scattering it-self + batch = tuple(t.to(device=device, non_blocking=True) for t in batch) + + input_ids, input_mask, segment_ids, video, video_mask = batch + loss = model(input_ids, segment_ids, input_mask, video, video_mask) + + if n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + loss.backward() + + total_loss += float(loss) + if (step + 1) % args.gradient_accumulation_steps == 0: + + torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) + + if scheduler is not None: + scheduler.step() # Update learning rate schedule + + optimizer.step() + optimizer.zero_grad() + + # https://github.com/openai/CLIP/issues/46 + if hasattr(model, 'module'): + torch.clamp_(model.module.clip.logit_scale.data, max=np.log(100)) + else: + torch.clamp_(model.clip.logit_scale.data, max=np.log(100)) + + global_step += 1 + if global_step % log_step == 0 and args.rank == 0: + logger.info("Epoch: %d/%s, Step: %d/%d, Lr: %s, Loss: %f, Time/step: %f", epoch + 1, + args.epochs, step + 1, + len(train_dataloader), "-".join([str('%.9f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]), + float(loss), + (time.time() - start_time) / (log_step * args.gradient_accumulation_steps)) + start_time = time.time() + + total_loss = total_loss / len(train_dataloader) + return total_loss, global_step + +def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list): + sim_matrix = [] + for idx1, b1 in enumerate(batch_list_t): + input_mask, segment_ids, *_tmp = b1 + sequence_output = batch_sequence_output_list[idx1] + each_row = [] + for idx2, b2 in enumerate(batch_list_v): + video_mask, *_tmp = b2 + visual_output = batch_visual_output_list[idx2] + b1b2_logits, *_tmp = model.get_similarity_logits(sequence_output, visual_output, input_mask, video_mask, + loose_type=model.loose_type) + b1b2_logits = b1b2_logits.cpu().detach().numpy() + each_row.append(b1b2_logits) + each_row = np.concatenate(tuple(each_row), axis=-1) + sim_matrix.append(each_row) + return sim_matrix + +def eval_epoch(args, model, test_dataloader, device, n_gpu): + + if hasattr(model, 'module'): + model = model.module.to(device) + else: + model = model.to(device) + + # ################################################################# + ## below variables are used to multi-sentences retrieval + # multi_sentence_: important tag for eval + # cut_off_points: used to tag the label when calculate the metric + # sentence_num: used to cut the sentence representation + # video_num: used to cut the video representation + # ################################################################# + multi_sentence_ = False + cut_off_points_, sentence_num_, video_num_ = [], -1, -1 + if hasattr(test_dataloader.dataset, 'multi_sentence_per_video') \ + and test_dataloader.dataset.multi_sentence_per_video: + multi_sentence_ = True + cut_off_points_ = test_dataloader.dataset.cut_off_points + sentence_num_ = test_dataloader.dataset.sentence_num + video_num_ = test_dataloader.dataset.video_num + cut_off_points_ = [itm - 1 for itm in cut_off_points_] + + if multi_sentence_: + logger.warning("Eval under the multi-sentence per video clip setting.") + logger.warning("sentence num: {}, video num: {}".format(sentence_num_, video_num_)) + + model.eval() + with torch.no_grad(): + batch_list_t = [] + batch_list_v = [] + batch_sequence_output_list, batch_visual_output_list = [], [] + total_video_num = 0 + + # ---------------------------- + # 1. cache the features + # ---------------------------- + for bid, batch in enumerate(test_dataloader): + batch = tuple(t.to(device) for t in batch) + input_ids, input_mask, segment_ids, video, video_mask = batch + + if multi_sentence_: + # multi-sentences retrieval means: one clip has two or more descriptions. + b, *_t = video.shape + sequence_output = model.get_sequence_output(input_ids, segment_ids, input_mask) + batch_sequence_output_list.append(sequence_output) + batch_list_t.append((input_mask, segment_ids,)) + + s_, e_ = total_video_num, total_video_num + b + filter_inds = [itm - s_ for itm in cut_off_points_ if itm >= s_ and itm < e_] + + if len(filter_inds) > 0: + video, video_mask = video[filter_inds, ...], video_mask[filter_inds, ...] + visual_output = model.get_visual_output(video, video_mask) + batch_visual_output_list.append(visual_output) + batch_list_v.append((video_mask,)) + total_video_num += b + else: + sequence_output, visual_output = model.get_sequence_visual_output(input_ids, segment_ids, input_mask, video, video_mask) + + batch_sequence_output_list.append(sequence_output) + batch_list_t.append((input_mask, segment_ids,)) + + batch_visual_output_list.append(visual_output) + batch_list_v.append((video_mask,)) + + print("{}/{}\r".format(bid, len(test_dataloader)), end="") + + # ---------------------------------- + # 2. calculate the similarity + # ---------------------------------- + if n_gpu < 1: + device_ids = list(range(n_gpu)) + batch_list_t_splits = [] + batch_list_v_splits = [] + batch_t_output_splits = [] + batch_v_output_splits = [] + bacth_len = len(batch_list_t) + split_len = (bacth_len + n_gpu - 1) // n_gpu + for dev_id in device_ids: + s_, e_ = dev_id * split_len, (dev_id + 1) * split_len + if dev_id == 0: + batch_list_t_splits.append(batch_list_t[s_:e_]) + batch_list_v_splits.append(batch_list_v) + + batch_t_output_splits.append(batch_sequence_output_list[s_:e_]) + batch_v_output_splits.append(batch_visual_output_list) + else: + devc = torch.device('cuda:{}'.format(str(dev_id))) + devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_t[s_:e_]] + batch_list_t_splits.append(devc_batch_list) + devc_batch_list = [tuple(t.to(devc) for t in b) for b in batch_list_v] + batch_list_v_splits.append(devc_batch_list) + + devc_batch_list = [b.to(devc) for b in batch_sequence_output_list[s_:e_]] + batch_t_output_splits.append(devc_batch_list) + devc_batch_list = [b.to(devc) for b in batch_visual_output_list] + batch_v_output_splits.append(devc_batch_list) + + parameters_tuple_list = [(batch_list_t_splits[dev_id], batch_list_v_splits[dev_id], + batch_t_output_splits[dev_id], batch_v_output_splits[dev_id]) for dev_id in device_ids] + parallel_outputs = parallel_apply(_run_on_single_gpu, model, parameters_tuple_list, device_ids) + sim_matrix = [] + for idx in range(len(parallel_outputs)): + sim_matrix += parallel_outputs[idx] + sim_matrix = np.concatenate(tuple(sim_matrix), axis=0) + else: + sim_matrix = _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_visual_output_list) + sim_matrix = np.concatenate(tuple(sim_matrix), axis=0) + + sim_matrix_dsl = sim_matrix * softmax(sim_matrix, axis=0) + sim_matrix_dsl_T = sim_matrix.T * softmax(sim_matrix.T, axis=0) + + if multi_sentence_: + logger.info("before reshape, sim matrix size: {} x {}".format(sim_matrix.shape[0], sim_matrix.shape[1])) + cut_off_points2len_ = [itm + 1 for itm in cut_off_points_] + max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)]) + sim_matrix_new = [] + for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_): + sim_matrix_new.append(np.concatenate((sim_matrix[s_:e_], + np.full((max_length-e_+s_, sim_matrix.shape[1]), -np.inf)), axis=0)) + sim_matrix = np.stack(tuple(sim_matrix_new), axis=0) + logger.info("after reshape, sim matrix size: {} x {} x {}". + format(sim_matrix.shape[0], sim_matrix.shape[1], sim_matrix.shape[2])) + + tv_metrics = tensor_text_to_video_metrics(sim_matrix) + vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix)) + + # dsl + cut_off_points2len_ = [itm + 1 for itm in cut_off_points_] + max_length = max([e_-s_ for s_, e_ in zip([0]+cut_off_points2len_[:-1], cut_off_points2len_)]) + sim_matrix_new = [] + for s_, e_ in zip([0] + cut_off_points2len_[:-1], cut_off_points2len_): + sim_matrix_new.append(np.concatenate((sim_matrix_dsl[s_:e_], + np.full((max_length-e_+s_, sim_matrix_dsl.shape[1]), -np.inf)), axis=0)) + sim_matrix_dsl = np.stack(tuple(sim_matrix_new), axis=0) + logger.info("after reshape, sim matrix size: {} x {} x {}". + format(sim_matrix_dsl.shape[0], sim_matrix_dsl.shape[1], sim_matrix_dsl.shape[2])) + + dsl_tv_metrics = tensor_text_to_video_metrics(sim_matrix_dsl) + dsl_vt_metrics = compute_metrics(tensor_video_to_text_sim(sim_matrix_dsl)) + else: + logger.info("sim matrix size: {}, {}".format(sim_matrix.shape[0], sim_matrix.shape[1])) + tv_metrics = compute_metrics(sim_matrix) + vt_metrics = compute_metrics(sim_matrix.T) + dsl_tv_metrics = compute_metrics(sim_matrix_dsl) + # dsl_vt_metrics = compute_metrics(sim_matrix_dsl.T) + dsl_vt_metrics = compute_metrics(sim_matrix_dsl_T) + logger.info('\t Length-T: {}, Length-V:{}'.format(len(sim_matrix), len(sim_matrix[0]))) + + # dsl output + logger.info("------------------------------------------------------------") + logger.info("DSL Text-to-Video:") + logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'. + format(dsl_tv_metrics['R1'], dsl_tv_metrics['R5'], dsl_tv_metrics['R10'], dsl_tv_metrics['MR'], dsl_tv_metrics['MeanR'])) + logger.info("DSL Video-to-Text:") + logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'. + format(dsl_vt_metrics['R1'], dsl_vt_metrics['R5'], dsl_vt_metrics['R10'], dsl_vt_metrics['MR'], dsl_vt_metrics['MeanR'])) + + logger.info("------------------------------------------------------------") + logger.info("Text-to-Video:") + logger.info('\t>>> R@1: {:.1f} - R@5: {:.1f} - R@10: {:.1f} - Median R: {:.1f} - Mean R: {:.1f}'. + format(tv_metrics['R1'], tv_metrics['R5'], tv_metrics['R10'], tv_metrics['MR'], tv_metrics['MeanR'])) + logger.info("Video-to-Text:") + logger.info('\t>>> V2T$R@1: {:.1f} - V2T$R@5: {:.1f} - V2T$R@10: {:.1f} - V2T$Median R: {:.1f} - V2T$Mean R: {:.1f}'. + format(vt_metrics['R1'], vt_metrics['R5'], vt_metrics['R10'], vt_metrics['MR'], vt_metrics['MeanR'])) + + R1 = tv_metrics['R1'] + return R1 + +def main(): + global logger + args = get_args() + args = set_seed_logger(args) + device, n_gpu = init_device(args, args.rank) + + tokenizer = ClipTokenizer() + + assert args.task_type == "retrieval" + model = init_model(args, device, n_gpu, args.rank) + + ## #################################### + # freeze testing + ## #################################### + assert args.freeze_layer_num <= 12 and args.freeze_layer_num >= -1 + if hasattr(model, "clip") and args.freeze_layer_num > -1: + for name, param in model.clip.named_parameters(): + # top layers always need to train + if name.find("ln_final.") == 0 or name.find("text_projection") == 0 or name.find("logit_scale") == 0 \ + or name.find("visual.ln_post.") == 0 or name.find("visual.proj") == 0: + continue # need to train + elif name.find("visual.transformer.resblocks.") == 0 or name.find("transformer.resblocks.") == 0: + layer_num = int(name.split(".resblocks.")[1].split(".")[0]) + if layer_num >= args.freeze_layer_num: + continue # need to train + + if name.find('.dpe.') >=0 or name.find('.dec.') >= 0: + continue # need to train + + if args.linear_patch == "3d" and name.find("conv2."): + continue # need to train + else: + # paramenters which < freeze_layer_num will be freezed + param.requires_grad = False + print('freezing: ', name, name.find('dpe.'), name.find('dec.'), param.shape) + + # exit(0) + + ## #################################### + # dataloader loading + ## #################################### + assert args.datatype in DATALOADER_DICT + + assert DATALOADER_DICT[args.datatype]["test"] is not None \ + or DATALOADER_DICT[args.datatype]["val"] is not None + + test_dataloader, test_length = None, 0 + if DATALOADER_DICT[args.datatype]["test"] is not None: + test_dataloader, test_length = DATALOADER_DICT[args.datatype]["test"](args, tokenizer) + + if DATALOADER_DICT[args.datatype]["val"] is not None: + val_dataloader, val_length = DATALOADER_DICT[args.datatype]["val"](args, tokenizer, subset="val") + else: + val_dataloader, val_length = test_dataloader, test_length + + ## report validation results if the ["test"] is None + if test_dataloader is None: + test_dataloader, test_length = val_dataloader, val_length + + if args.rank == 0: + logger.info("***** Running test *****") + logger.info(" Num examples = %d", test_length) + logger.info(" Batch size = %d", args.batch_size_val) + logger.info(" Num steps = %d", len(test_dataloader)) + logger.info("***** Running val *****") + logger.info(" Num examples = %d", val_length) + + ## #################################### + # train and eval + ## #################################### + if args.do_train: + train_dataloader, train_length, train_sampler = DATALOADER_DICT[args.datatype]["train"](args, tokenizer) + num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1) + / args.gradient_accumulation_steps) * args.epochs + + coef_lr = args.coef_lr + optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.rank, coef_lr=coef_lr) + + if args.rank == 0: + logger.info("***** Running training *****") + logger.info(" Num examples = %d", train_length) + logger.info(" Batch size = %d", args.batch_size) + logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps) + + best_score = 0.00001 + best_output_model_file = "None" + ## ############################################################## + # resume optimizer state besides loss to continue train + ## ############################################################## + resumed_epoch = 0 + if args.resume_model: + checkpoint = torch.load(args.resume_model, map_location='cpu') + optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + resumed_epoch = checkpoint['epoch']+1 + resumed_loss = checkpoint['loss'] + print('begin training!!!!!!!!') + global_step = 0 + for epoch in range(resumed_epoch, args.epochs): + train_sampler.set_epoch(epoch) + tr_loss, global_step = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, + scheduler, global_step, local_rank=args.rank) + if args.rank == 0: + logger.info("Epoch %d/%s Finished, Train Loss: %f", epoch + 1, args.epochs, tr_loss) + + ########## here we save the last ckpt ############# + ########## feel free to modify this for saving the best ckpt ####### + output_model_file = save_model(epoch, args, model, optimizer, tr_loss, type_name="") + + ## Run on val dataset, this process is *TIME-consuming*. + # logger.info("Eval on val dataset") + # R1 = eval_epoch(args, model, val_dataloader, device, n_gpu) + + R1 = eval_epoch(args, model, test_dataloader, device, n_gpu) + if best_score <= R1: + best_score = R1 + best_output_model_file = output_model_file + logger.info("The best model is: {}, the R1 is: {:.4f}".format(best_output_model_file, best_score)) + + ## Uncomment if want to test on the best checkpoint + # if args.local_rank == 0: + # model = load_model(-1, args, n_gpu, device, model_file=best_output_model_file) + # eval_epoch(args, model, test_dataloader, device, n_gpu) + elif args.do_eval: + if args.rank == 0: + eval_epoch(args, model, test_dataloader, device, n_gpu) + +if __name__ == "__main__": + main() diff --git a/Downstream/Video-Text-Retrieval/metrics.py b/Downstream/Video-Text-Retrieval/metrics.py new file mode 100644 index 0000000..ec6d4f6 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/metrics.py @@ -0,0 +1,70 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals +from __future__ import print_function + +import numpy as np +import torch + +def compute_metrics(x): + sx = np.sort(-x, axis=1) + d = np.diag(-x) + d = d[:, np.newaxis] + ind = sx - d + ind = np.where(ind == 0) + ind = ind[1] + metrics = {} + metrics['R1'] = float(np.sum(ind == 0)) * 100 / len(ind) + metrics['R5'] = float(np.sum(ind < 5)) * 100 / len(ind) + metrics['R10'] = float(np.sum(ind < 10)) * 100 / len(ind) + metrics['MR'] = np.median(ind) + 1 + metrics["MedianR"] = metrics['MR'] + metrics["MeanR"] = np.mean(ind) + 1 + metrics["cols"] = [int(i) for i in list(ind)] + return metrics + +def print_computed_metrics(metrics): + r1 = metrics['R1'] + r5 = metrics['R5'] + r10 = metrics['R10'] + mr = metrics['MR'] + print('R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}'.format(r1, r5, r10, mr)) + +# below two functions directly come from: https://github.com/Deferf/Experiments +def tensor_text_to_video_metrics(sim_tensor, top_k = [1,5,10]): + if not torch.is_tensor(sim_tensor): + sim_tensor = torch.tensor(sim_tensor) + + # Permute sim_tensor so it represents a sequence of text-video similarity matrices. + # Then obtain the double argsort to position the rank on the diagonal + stacked_sim_matrices = sim_tensor.permute(1, 0, 2) + first_argsort = torch.argsort(stacked_sim_matrices, dim = -1, descending= True) + second_argsort = torch.argsort(first_argsort, dim = -1, descending= False) + + # Extracts ranks i.e diagonals + ranks = torch.flatten(torch.diagonal(second_argsort, dim1 = 1, dim2 = 2)) + + # Now we need to extract valid ranks, as some belong to inf padding values + permuted_original_data = torch.flatten(torch.diagonal(sim_tensor, dim1 = 0, dim2 = 2)) + mask = ~ torch.logical_or(torch.isinf(permuted_original_data), torch.isnan(permuted_original_data)) + valid_ranks = ranks[mask] + # A quick dimension check validates our results, there may be other correctness tests pending + # Such as dot product localization, but that is for other time. + #assert int(valid_ranks.shape[0]) == sum([len(text_dict[k]) for k in text_dict]) + if not torch.is_tensor(valid_ranks): + valid_ranks = torch.tensor(valid_ranks) + results = {f"R{k}": float(torch.sum(valid_ranks < k) * 100 / len(valid_ranks)) for k in top_k} + results["MedianR"] = float(torch.median(valid_ranks + 1)) + results["MeanR"] = float(np.mean(valid_ranks.numpy() + 1)) + results["Std_Rank"] = float(np.std(valid_ranks.numpy() + 1)) + results['MR'] = results["MedianR"] + return results + +def tensor_video_to_text_sim(sim_tensor): + if not torch.is_tensor(sim_tensor): + sim_tensor = torch.tensor(sim_tensor) + # Code to avoid nans + sim_tensor[sim_tensor != sim_tensor] = float('-inf') + # Forms a similarity matrix for use with rank at k + values, _ = torch.max(sim_tensor, dim=1, keepdim=True) + return torch.squeeze(values).T diff --git a/Downstream/Video-Text-Retrieval/modules/__init__.py b/Downstream/Video-Text-Retrieval/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..c864fb8 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/__init__.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..02ab435 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/__init__.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/file_utils.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/file_utils.cpython-36.pyc new file mode 100644 index 0000000..2958687 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/file_utils.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/file_utils.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/file_utils.cpython-38.pyc new file mode 100644 index 0000000..22a8c21 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/file_utils.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/modeling.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/modeling.cpython-36.pyc new file mode 100644 index 0000000..a684437 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/modeling.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/modeling.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/modeling.cpython-38.pyc new file mode 100644 index 0000000..06608c5 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/modeling.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/module_clip.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_clip.cpython-36.pyc new file mode 100644 index 0000000..c2f00e0 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_clip.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/module_clip.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_clip.cpython-38.pyc new file mode 100644 index 0000000..04485b1 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_clip.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/module_cross.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_cross.cpython-36.pyc new file mode 100644 index 0000000..beab3e6 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_cross.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/module_cross.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_cross.cpython-38.pyc new file mode 100644 index 0000000..4fc679d Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/module_cross.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/optimization.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/optimization.cpython-36.pyc new file mode 100644 index 0000000..f823881 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/optimization.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/tokenization_clip.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/tokenization_clip.cpython-36.pyc new file mode 100644 index 0000000..33953df Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/tokenization_clip.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/tokenization_clip.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/tokenization_clip.cpython-38.pyc new file mode 100644 index 0000000..d212ec0 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/tokenization_clip.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/until_config.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_config.cpython-36.pyc new file mode 100644 index 0000000..00288f0 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_config.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/until_config.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_config.cpython-38.pyc new file mode 100644 index 0000000..41805bd Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_config.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/until_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_module.cpython-36.pyc new file mode 100644 index 0000000..bd5a084 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/__pycache__/until_module.cpython-38.pyc b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_module.cpython-38.pyc new file mode 100644 index 0000000..0ab85e6 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/__pycache__/until_module.cpython-38.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/bpe_simple_vocab_16e6.txt.gz b/Downstream/Video-Text-Retrieval/modules/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/__init__.py new file mode 100644 index 0000000..ca28329 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/__init__.py @@ -0,0 +1,2 @@ +from .clip import * +from .evl_utils import * \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..841a1bb Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/clip.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/clip.cpython-36.pyc new file mode 100644 index 0000000..a1ef545 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/clip.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/model.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/model.cpython-36.pyc new file mode 100644 index 0000000..5f246bf Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/model.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/model_no_freeze_only_global.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/model_no_freeze_only_global.cpython-36.pyc new file mode 100644 index 0000000..ffd631c Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/model_no_freeze_only_global.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/simple_tokenizer.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/simple_tokenizer.cpython-36.pyc new file mode 100644 index 0000000..fbbc8d8 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/__pycache__/simple_tokenizer.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/bpe_simple_vocab_16e6.txt.gz b/Downstream/Video-Text-Retrieval/modules/clip_evl/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..4903fdf --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1 @@ +../bpe_simple_vocab_16e6.txt.gz \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/clip.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/clip.py new file mode 100644 index 0000000..5856632 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/clip.py @@ -0,0 +1,258 @@ +import hashlib +import os +import urllib +import warnings +from typing import Any, Union, List +from pkg_resources import packaging + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +_tokenizer = _Tokenizer() + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load( + name: str, + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, download_root: str = None, + # evl + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None, + ): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + download_root: str + path to download the model files; by default, it uses "~/.cache/clip" + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + ''' + with open(model_path, 'rb') as opened_file: + try: + # loading JIT archive + model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + ''' + init_state_dict = torch.load(model_path, map_location='cpu')['state_dict'] + state_dict = {} + for k, v in init_state_dict.items(): + k = k.replace('clip.','') + state_dict[k] = v + + if not jit: + model = build_model( + state_dict or model.state_dict(), + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain, + init_zero=init_zero, mergeclip=mergeclip, mergeweight=mergeweight, use_capdecoder=use_capdecoder, clip_state_dict=clip_state_dict, + ).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens):] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/dog.png b/Downstream/Video-Text-Retrieval/modules/clip_evl/dog.png new file mode 100644 index 0000000..bb18167 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/dog.png differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__init__.py new file mode 100644 index 0000000..bede4c0 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__init__.py @@ -0,0 +1,7 @@ +from .evl_module import TransformerDecoder +from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance +from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336 +from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336 +from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336 +from .clip_vit_fusion import vit_fusion_b32, vit_fusion_b16, vit_fusion_l14, vit_fusion_l14_336 +from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..3b2c858 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention.cpython-36.pyc new file mode 100644 index 0000000..48dd9df Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_bias.cpython-36.pyc new file mode 100644 index 0000000..8813a66 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_module.cpython-36.pyc new file mode 100644 index 0000000..3541819 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc new file mode 100644 index 0000000..31c5c2a Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit.cpython-36.pyc new file mode 100644 index 0000000..ef45a10 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc new file mode 100644 index 0000000..2dcdeed Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc new file mode 100644 index 0000000..df9ee01 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_fusion.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_fusion.cpython-36.pyc new file mode 100644 index 0000000..5738bf6 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_fusion.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_only_global.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_only_global.cpython-36.pyc new file mode 100644 index 0000000..2104e2d Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/clip_vit_only_global.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/evl_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/evl_module.cpython-36.pyc new file mode 100644 index 0000000..e8714e2 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/evl_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc new file mode 100644 index 0000000..2a80ba4 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention.py new file mode 100644 index 0000000..28ce56d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_bias.py new file mode 100644 index 0000000..006f56a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_bias.py @@ -0,0 +1,201 @@ +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module_bias import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False, rpb=None): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True, rpb=rpb) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, rpb=rpb) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, rpb=rpb) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module.py new file mode 100644 index 0000000..fd4e73b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module.py @@ -0,0 +1,316 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module_bias.py new file mode 100644 index 0000000..331c60b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/attention_module_bias.py @@ -0,0 +1,319 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False, + rpb: Tensor = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + - rpb: relative postion bias + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + if rpb is not None: + attn_output_weights = attn_output_weights + rpb + attn_output_weights = softmax(attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit.py new file mode 100644 index 0000000..5b8ea39 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python + +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x, return_qk=False): + if return_qk: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True) + return q, k, attn_output + else: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, return_qk=False): + if return_qk: + q, k, attn_output = self.attention(self.ln_1(x), return_qk=True) + x = x + self.drop_path(attn_output) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x, q, k + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x) + if i >= self.layers - return_num: + L, NT, C = x.shape + N = NT // T + features.append(x.view(L, N, T, C)) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4, return_qk=True): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_b32(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_b16(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14_336(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d.py new file mode 100644 index 0000000..3ad38bf --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d.py @@ -0,0 +1,255 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def attention_temporal(self, x): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8): + # temporal + # x: 1+HWT, N, C + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + res_temporal = self.attention_temporal(self.ln_t(xt)) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + res_spatial = self.attention(self.ln_1(xs)) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d_dw_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d_dw_bias.py new file mode 100644 index 0000000..c6007c4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_2plus1d_dw_bias.py @@ -0,0 +1,317 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange +import torch.utils.checkpoint as checkpoint + +from .attention_bias import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}') + self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model) + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head])) + + idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long) + for q in range(t_size): + for k in range(t_size): + offs = q - k + t_size - 1 + idx_tensor_t[q, k] = offs + self.idx_tensor_t = idx_tensor_t + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head])) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long) + for q in range(spatial_size ** 2): + qi, qj = q // spatial_size, q % spatial_size + for k in range(spatial_size ** 2): + ki, kj = k // spatial_size, k % spatial_size + i_offs = qi - ki + spatial_size - 1 + j_offs = qj - kj + spatial_size - 1 + idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs + self.idx_tensor = idx_tensor + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.pos_embed.weight, 0) + nn.init.constant_(self.pos_embed.bias, 0) + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x, rpb=None): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def attention_temporal(self, x, rpb=None): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def forward(self, x, T=8, mode='video'): + # temporal + # x: 1+HWT, N, C + # pos_emb + tmp_x = x[1:, :, :] + LT, N, C = tmp_x.shape + L = LT // T + H = W = int(L ** 0.5) + tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1) + tmp_x = tmp_x + self.pos_embed(tmp_x) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C) + x[1:, :, :] = tmp_x + + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + # no rpb_t for image + if mode == 'image': + rpb_t = None + else: + # rpb_t: T, T, H => B*H, T, T + self.idx_tensor_t = self.idx_tensor_t.to(xt.device) + rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1) + res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + # rpb: L, L, H => B*H, L+1, L+1 + rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype) + self.idx_tensor = self.idx_tensor.to(xs.device) + rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1) + rpb = rpb.repeat(T*N, 1, 1) + res_spatial = self.attention(self.ln_1(xs), rpb=rpb) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, + drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8, mode='video'): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T, mode=mode) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, + num_frames=8, drop_path_rate=0., t_size=8, spatial_size=7 + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size) + + def forward(self, x, return_num=4, mode='video'): + if len(x.size()) == 5: + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + # add temporal position embedding for video + if mode == 'video': + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + else: + pass + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, mode=mode + ) + + return features + + +def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_b16(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + spatial_size=14, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_dw_bias_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_fusion.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_fusion.py new file mode 100644 index 0000000..aa6932c --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_fusion.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention +from ipdb import set_trace + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class Local_MHRA(nn.Module): + def __init__(self, d_model, dw_reduction=1.5, pos_kernel_size=3): + super().__init__() + + padding = pos_kernel_size // 2 + re_d_model = int(d_model // dw_reduction) + self.pos_embed = nn.Sequential( + nn.BatchNorm3d(d_model), + nn.Conv3d(d_model, re_d_model, kernel_size=1, stride=1, padding=0), + nn.Conv3d(re_d_model, re_d_model, kernel_size=(pos_kernel_size, 1, 1), stride=(1, 1, 1), padding=(padding, 0, 0), groups=re_d_model), + nn.Conv3d(re_d_model, d_model, kernel_size=1, stride=1, padding=0), + ) + + # init zero + print('Init zero for Conv in pos_emb') + nn.init.constant_(self.pos_embed[3].weight, 0) + nn.init.constant_(self.pos_embed[3].bias, 0) + + def forward(self, x): + return self.pos_embed(x) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, drop_path=0.0, + dw_reduction=1.5, + ): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.lmhra1 = Local_MHRA(d_model, dw_reduction=dw_reduction) + self.lmhra2 = Local_MHRA(d_model, dw_reduction=dw_reduction) + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8): + # x: 1+HW, NT, C + # Local MHRA + tmp_x = x[1:, :, :] + L, NT, C = tmp_x.shape + N = NT // T + H = W = int(L ** 0.5) + tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous() + tmp_x = tmp_x + self.drop_path(self.lmhra1(tmp_x)) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C) + x = torch.cat([x[:1, :, :], tmp_x], dim=0) + # MHSA + x = x + self.drop_path(self.attention(self.ln_1(x))) + # Local MHRA + tmp_x = x[1:, :, :] + tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous() + tmp_x = tmp_x + self.drop_path(self.lmhra2(tmp_x)) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C) + x = torch.cat([x[:1, :, :], tmp_x], dim=0) + # FFN + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Extractor(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, + mlp_factor=4.0, dropout=0.0, drop_path=0.0, + ): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + # zero init + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0., + t_size=8, dw_reduction=2, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + super().__init__() + self.T = t_size + self.return_list = return_list + # Backbone + b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, attn_mask, + drop_path=b_dpr[i], + dw_reduction=dw_reduction, + ) for i in range(layers) + ]) + + # Extractor + assert n_layers == len(return_list) + self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim)) + self.dpe = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.dpe: + nn.init.constant_(m.bias, 0.) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + Extractor( + n_dim, n_head, mlp_factor=mlp_factor, + dropout=mlp_dropout[i], drop_path=dpr[i], + ) for i in range(n_layers) + ]) + # # projection + # self.proj = nn.Sequential( + # nn.LayerNorm(n_dim), + # nn.Dropout(cls_dropout), + # nn.Linear(n_dim, num_classes), + # ) + self.balance = nn.Parameter(torch.zeros((n_dim))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, mode='video', return_all_feats=False): + if mode == 'video': + T_down = self.T + else: + T_down = 1 + L, NT, C = x.shape + N = NT // T_down + H = W = int((L - 1) ** 0.5) + assert H * W == L - 1 + cls_token = self.temporal_cls_token.repeat(1, N, 1) + + j = -1 + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T_down) + if i in self.return_list: + j += 1 + tmp_x = x.clone() + tmp_x = tmp_x.view(L, N, T_down, C) + # dpe + _, tmp_feats = tmp_x[:1], tmp_x[1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W) + tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1) + tmp_x[1:] = tmp_x[1:] + tmp_feats + # enhancer + tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + cls_token = self.dec[j](cls_token, tmp_x) + + weight = self.sigmoid(self.balance) + residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C + # return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual) + feats = (1 - weight) * cls_token[0, :, :] + weight * residual + if return_all_feats: + return feats, x.view(L, N, T_down, C) + + return feats + + +class VisionTransformer(nn.Module): + def __init__( + self, + # backbone + input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0., + t_size=8, kernel_size=3, dw_reduction=1.5, + # extractor + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + padding = (kernel_size - 1) // 2 + self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer( + width, layers, heads, dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, t_size=t_size // 2, + return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, num_classes=num_classes, + ) + + def forward(self, x, mode='video', return_all_feats=False): + # taken from https://github.com/facebookresearch/omnivore/blob/main/omnivore/models/swin_transformer_3d.py#L703 + if mode == 'image': # for image, stride 2 + # ! Use replicate here + x = F.pad(x, (0, 0, 0, 0, 0, 1), mode="replicate") + + x = self.conv1(x) # shape = [*, width, grid, grid] + N, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + out = self.transformer(x, mode=mode, return_all_feats=return_all_feats) + return out + + +def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + print(f'Ignore: {k}') + continue + print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + +def clip_load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + # print(k, k in state_dict_3d, k in state_dict) + if k in state_dict and k in state_dict_3d and state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + print(f'Ignore: {k}') + continue + print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + +def vit_fusion_b32( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_fusion_b16( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_fusion_l14( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_fusion_l14_336( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 16 + + model = vit_fusion_b16( + pretrained=False, + t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4, + dw_reduction=1.5, + ) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_only_global.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_only_global.py new file mode 100644 index 0000000..6408ef1 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/clip_vit_only_global.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + +import logging +logger = logging.getLogger(__name__) + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, drop_path=0.0, + ): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + logger.info(f'Drop path rate: {drop_path}') + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8, use_checkpoint=False): + # x: 1+HW, NT, C + # MHSA + if use_checkpoint: + attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x)) + x = x + self.drop_path(attn_out) + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + # FFN + if use_checkpoint: + mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x)) + x = x + self.drop_path(mlp_out) + else: + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Extractor(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, + mlp_factor=4.0, dropout=0.0, drop_path=0.0, + ): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + logger.info(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + # zero init + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + super().__init__() + self.T = t_size + self.return_list = return_list + # Backbone + b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, attn_mask, + drop_path=b_dpr[i], + ) for i in range(layers) + ]) + # checkpoint + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + logger.info(f'Use checkpoint: {self.use_checkpoint}') + logger.info(f'Checkpoint number: {self.checkpoint_num}') + + # Extractor + assert n_layers == len(return_list) + self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim)) + self.dpe = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.dpe: + nn.init.constant_(m.bias, 0.) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + Extractor( + n_dim, n_head, mlp_factor=mlp_factor, + dropout=mlp_dropout[i], drop_path=dpr[i], + ) for i in range(n_layers) + ]) + # # projection + # self.proj = nn.Sequential( + # nn.LayerNorm(n_dim), + # nn.Dropout(cls_dropout), + # nn.Linear(n_dim, num_classes), + # ) + self.balance = nn.Parameter(torch.zeros((n_dim))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, mode='video', return_all_feats=False): + if mode == 'video': + T_down = self.T + else: + T_down = 1 + L, NT, C = x.shape + N = NT // T_down + H = W = int((L - 1) ** 0.5) + cls_token = self.temporal_cls_token.repeat(1, N, 1) + + j = -1 + for i, resblock in enumerate(self.resblocks): + if self.use_checkpoint and i < self.checkpoint_num[0]: + x = resblock(x, self.T, use_checkpoint=True) + else: + x = resblock(x, T_down) + if i in self.return_list: + j += 1 + tmp_x = x.clone() + tmp_x = tmp_x.view(L, N, T_down, C) + # dpe + _, tmp_feats = tmp_x[:1], tmp_x[1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W) + tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1) + tmp_x[1:] = tmp_x[1:] + tmp_feats + # enhancer + tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + cls_token = self.dec[j](cls_token, tmp_x) + + weight = self.sigmoid(self.balance) + residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C + # return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual) + feats = (1 - weight) * cls_token[0, :, :] + weight * residual + if return_all_feats: + return feats, x.view(L, N, T_down, C) + + return feats + + +class VisionTransformer(nn.Module): + def __init__( + self, + # backbone + input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + # extractor + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer( + width, layers, heads, + backbone_drop_path_rate=backbone_drop_path_rate, + use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size, + return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, num_classes=num_classes, + ) + + def forward(self, x, mode='video', return_all_feats=False): + x = self.conv1(x) # shape = [*, width, grid, grid] + N, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + out = self.transformer(x, mode=mode, return_all_feats=return_all_feats) + return out + + +def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + logger.info(f'Ignore: {k}') + continue + logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + + +def vit_only_global_b32( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_b16( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14_336( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 8 + + model = vit_only_global_l14( + pretrained=False, + t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4, + use_checkpoint=True, checkpoint_num=[0], + ) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + logger.info(flop_count_table(flops, max_depth=1)) + logger.info(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module.py new file mode 100644 index 0000000..a1f688e --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.xavier_uniform_(self.mlp[-1].weight) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder(nn.Module): + def __init__(self, n_layers=4, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, + use_t_conv=True, use_t_pos_embed=True, num_classes=400, + add_residual=False, + ): + super().__init__() + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + self.proj = nn.Sequential( + nn.LayerNorm(n_dim), + nn.Dropout(cls_dropout), + nn.Linear(n_dim, num_classes), + ) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.add_residual = add_residual + print(f'Add residual {add_residual}') + + if use_t_conv: + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + self.tconv = None + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + self.t_size = t_size + + def forward(self, clip_feats_all): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + if self.pemb_t is not None: + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + if self.add_residual: + residual = clip_feats_all[-1][0].mean(1) + return self.proj(x[0, :, :] + residual) + else: + return self.proj(x[0, :, :]) + + +if __name__ == '__main__': + model = TransformerDecoder() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(4): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + torch.zeros([L, N, T, C])) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module_uniformer_diff_conv_balance.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module_uniformer_diff_conv_balance.py new file mode 100644 index 0000000..09e6aa6 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/evl_utils/evl_module_uniformer_diff_conv_balance.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import trunc_normal_, DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class STM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(STM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + + # apply transformation conv to t+1 feature + conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:]) + __, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + + # motion fea = t+1_fea - t_fea + # pad the last timestamp + diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W + # pad = (0,0,0,0,0,0,0,1) + diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W + diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + y = self.recover(diff_fea_pluszero) # NT, C, H, W + + # reshape + y = y.reshape(N, T, C, L).permute(3, 0, 1, 2) + y = torch.cat([cls_token, y], dim=0) + return y + + +class DSTM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(DSTM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + # DW(T+1) - T + self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_pre = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1) + # DW(T-1) - T + self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_back = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + # apply transformation conv to t+1/t-1 feature + pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W + back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:]) + back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:]) + __, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + # pre_fea = t+1_fea - t_fea + # back_fea = t-1_fea - t_fea + pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W + back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W + # pad the last/first timestamp + pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W + pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W + back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + # recover channel + pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W + back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W + # reshape + y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2) + + # cat cls_token + y = torch.cat([cls_token, y], dim=0) + return y + + +class TDN(nn.Module): + def __init__(self, channel, n_segment=8, index=1, reduction=4): + super(TDN, self).__init__() + self.channel = channel + self.reduction = reduction + self.n_segment = n_segment + self.stride = 2**(index-1) + self.conv1 = nn.Conv2d(in_channels=self.channel, + out_channels=self.channel//self.reduction, + kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction, + kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False) + + self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2) + self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_forward = nn.Sigmoid() + + self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1) + self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_backward = nn.Sigmoid() + + self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1) + self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0) + + self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(num_features=self.channel) + + self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + def spatial_pool(self, x): + nt, channel, height, width = x.size() + input_x = x + # [N, C, H * W] + input_x = input_x.view(nt, channel, height * width) + # [N, 1, C, H * W] + input_x = input_x.unsqueeze(1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = context_mask.view(nt, 1, height * width) + # [N, 1, H * W] + context_mask = self.softmax(context_mask) + context_mask = context_mask.view(nt,1,height,width) + return context_mask + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + + bottleneck = self.conv1(fea) # nt, c//r, h, w + bottleneck = self.bn1(bottleneck) # nt, c//r, h, w + reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w + + t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w + _, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w + + conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w + reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w + _, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w + tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w + diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w + diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w + diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w + diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w + y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1 + y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1 + + y_forward_smallscale4 = diff_fea_pluszero_forward + y_backward_smallscale4 = diff_fea_pluszero_backward + y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2)) + y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2)) + + y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4)) + y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4)) + + y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:]) + y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:]) + + y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1 + y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1 + + y_forward = self.sigmoid_forward(y_forward) - 0.5 + y_backward = self.sigmoid_backward(y_backward) - 0.5 + + y = 0.5 * y_forward + 0.5 * y_backward + attn = fea * y + x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2) + x = torch.cat([cls_token, x], dim=0) + return x + + +class CMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = conv_1x1x1(in_features, hidden_features) + self.act = act_layer() + self.fc2 = conv_1x1x1(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class CBlock(nn.Module): + def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True): + super().__init__() + self.norm1 = bn_3d(dim) + self.conv1 = conv_1x1x1(dim, dim, 1) + self.conv2 = conv_1x1x1(dim, dim, 1) + if uni_type == '3d': + print('Use 3d conv for local MHRA') + self.attn = conv_3x3x3(dim, dim, groups=dim) + else: + print('Use 2d conv for local MHRA') + self.attn = conv_1x3x3(dim, dim, groups=dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.add_ffn = add_ffn + if add_ffn: + print('Add FFN in local MHRA') + self.norm2 = bn_3d(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout) + + print('Init zero') + nn.init.constant_(self.conv2.weight, 0.) + nn.init.constant_(self.conv2.bias, 0.) + if add_ffn: + nn.init.constant_(self.mlp.fc2.weight, 0.) + nn.init.constant_(self.mlp.fc2.bias, 0.) + + def forward(self, x): + x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) + if self.add_ffn: + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + # nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + # nn.init.xavier_uniform_(self.mlp[-1].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder_uniformer_diff_conv_balance(nn.Module): + def __init__(self, n_layers=4, + uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + balance=0., + use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4, + use_t_pos_embed=True, num_classes=400): + super().__init__() + + n_layers += uni_layer + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + + self.uni_layer = uni_layer + self.uni_dec = nn.ModuleList([ + CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn) + for i in range(uni_layer) + ]) + + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + self.proj = nn.Sequential( + nn.LayerNorm(n_dim), + nn.Dropout(cls_dropout), + nn.Linear(n_dim, num_classes), + ) + + self.pre_prompt = pre_prompt + if pre_prompt: + print('Add pre prompt') + self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + + if use_t_conv: + self.t_conv_type = t_conv_type + if t_conv_type == '1d': + print('Use 1d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + print('Use 3d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + else: + self.tconv = None + + self.before_me = before_me + self.after_me = after_me + if before_me or after_me: + assert before_me != after_me + print(f'Use {me_type} attention, Before {before_me}, After {after_me}') + if me_type == 'stm': + me_op = STM + elif me_type == 'dstm': + me_op = DSTM + elif me_type == 'tdn': + me_op = TDN + self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)]) + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + print(F'Balnce weight {balance}') + self.balance = nn.Parameter(torch.ones((n_dim)) * balance) + self.sigmoid = nn.Sigmoid() + + def forward(self, clip_feats_all, mode='video'): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + if self.after_me: + origin_clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.before_me: + # contain residual + clip_feats[i] = self.me[i](clip_feats[i]) + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + if self.t_conv_type == '1d': + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + else: + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + if self.pemb_t is not None and mode == 'video': + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + if self.after_me: + clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i]) + if i < self.uni_layer: + # L, N, T, C + L, N, T, C = clip_feats[i].shape + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + if self.pre_prompt: + pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + for i in range(len(self.dec)): + if i < self.uni_layer: + pre_x = self.dec[i](pre_x, clip_feats[i]) + elif i == self.uni_layer: + clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0) + x = self.dec[i](x, clip_feats[i]) + else: + x = self.dec[i](x, clip_feats[i]) + else: + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + # real residual + # L, N, T, C + residual = clip_feats_all[-1][0].mean(1) + weight = self.sigmoid(self.balance) + + return self.proj((1 - weight) * x[0, :, :] + weight * residual) + + +if __name__ == '__main__': + model = TransformerDecoder_uniformer_diff_conv_balance() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(8): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + tuple(torch.zeros([L, N, T, C]) for _ in range(3))) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model) + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/model.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/model.py new file mode 100644 index 0000000..e186c19 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/model.py @@ -0,0 +1,451 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +from . import evl_utils +from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance +from einops import rearrange +from ipdb import set_trace +from copy import deepcopy +from torch.utils.checkpoint import checkpoint_sequential +# from .clip_decoders import CaptionDecoder + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_checkpoint=False, checkpoint_num=[0, 0]): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + + def forward(self, x: torch.Tensor): + if self.use_checkpoint and self.checkpoint_num[1] > 0: + segments = min(len(self.resblocks), self.checkpoint_num[1]) + return checkpoint_sequential(self.resblocks, segments, x) + else: + return self.resblocks(x) + # return self.resblocks(x) + + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, + backbone='vit_2plus1d_dw_bias_b16', + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + init_zero=True, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + use_capdecoder=False, + ): + super().__init__() + + # All assertions is for adhoc clip_kc and should be removed + # assert vision_layers == 12, vision_layers + assert image_resolution == 224, image_resolution + # assert vision_patch_size == 32, vision_patch_size + assert vision_width == n_dim, (vision_width, n_dim) + + self.vision_width = n_dim + + self.context_length = context_length + + vision_heads = vision_width // 64 + self.visual = evl_utils.__dict__[backbone]( + pretrained=False, t_size=t_size, mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, n_dim=n_dim, + n_head=n_head, return_list=return_list, drop_path_rate=drop_path_rate, backbone_drop_path_rate=drop_path_rate, + use_checkpoint=True, checkpoint_num=[24,100], + ) + # self.evl = TransformerDecoder_uniformer_diff_conv_balance( + # n_layers=n_layers, n_dim=n_dim, n_head=n_head, + # mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + # mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + # use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + # uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + # pre_prompt=pre_prompt, balance=balance, + # after_me=after_me, before_me=before_me, + # me_type=me_type, me_reduction=me_reduction, + # init_zero=init_zero, + # ) + self.visual_ln_post = nn.LayerNorm(n_dim) + scale = n_dim ** -0.5 + self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim)) + self.return_qk = use_image_attnmap + self.return_num = n_layers + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask(), + use_checkpoint=False, checkpoint_num=[24,100], + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.embed_dim = embed_dim + + # We seperate the mask embedding to load pretrained model + self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width)) + + # # To keep the num_embeddings unchanged, we add this to embedded text + # self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + nn.init.normal_(self.text_mask_embedding, std=0.02) + # nn.init.constant_(self.eot_token_embedding, 0.0) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + nn.init.constant_(self.visual_ln_post.weight, 1.0) + nn.init.constant_(self.visual_ln_post.bias, 0.0) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_video(self, video, return_all_feats=False, masked_indices=None, mode="video"): + # video: [N, C, T, H, W] + feats = self.visual(video, return_all_feats=return_all_feats, mode=mode) + if return_all_feats: + x, feats = feats + x = self.visual_ln_post(x) + if self.visual_proj is not None: + x = x @ self.visual_proj + + if return_all_feats: + return x, feats # [N, C], [L, N, T, C] + + return x + + def encode_text(self, text, masked_indices=None, return_all_feats=False): + # assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \ + # "The last token of each sentence should be eot_token, check the input" + + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + # x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding + + if masked_indices is not None: + x[masked_indices] = self.text_mask_embedding + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + if self.text_projection is not None: + feats = feats @ self.text_projection + + if return_all_feats: + return feats, x + + return feats + + def forward(self, video, text): + video_features = self.encode_video(video) + text_features = self.encode_text(text) + + # normalized features + video_features = video_features / video_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_video= logit_scale * video_features @ text_features.t() + logits_per_text = logits_per_video.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_video, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + +def interpolate_temporal_pos_embed(pos_embed, T): + # [1, t, d] -> [1, d, t] + pos_embed = pos_embed.transpose(-2, -1) + # [1, d, t] -> [1, d, T] + pos_embed = F.interpolate(pos_embed, size=(T), mode='linear') + # [1, d, T] -> [1, T, d] + return pos_embed.transpose(-2, -1) + +def interploate_rpb(rpb, T): + t1 = T * 2 - 1 + rpb = rpb.transpose(0, 1).unsqueeze(0) + rpb = F.interpolate(rpb, size=(t1), mode='linear') + return rpb.squeeze(0).transpose(0, 1) + +def build_model( + state_dict: dict, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False, + init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None, + ): + vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict + + if "visual.proj" in state_dict: + state_dict["visual_proj"] = state_dict["visual.proj"] + state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"] + state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"] + del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"] + # new_state_dict = OrderedDict() + # for k, v in state_dict.items(): + # if k.startswith("backbone."): + # k = k.replace("backbone.", "visual.") + + # new_state_dict[k] = v + + # state_dict = new_state_dict + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + # embed_dim = 512 + # context_length = 77 + # vocab_size = 49408 + # transformer_width = 512 + # transformer_layers = 12 + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + ########### add this ############ + # for k, v in state_dict.items(): + # print(k, v.shape) + ################################################ + + vision_width = state_dict["visual_proj"].shape[0] + n_dim = vision_width + if vision_width == 768: + backbone = "vit_only_global_b16" + n_head = 12 + return_list = [8, 9, 10, 11] + # comment this as we always use vit-L/14 + # elif vision_width == 1024 and state_dict["input_resolution"] == 224: + elif vision_width == 1024: + backbone = "vit_only_global_l14" + n_head = 16 + return_list = [20, 21, 22, 23] + elif vision_width == 1024 and state_dict["input_resolution"] == 336: + backbone = "vit_only_global_l14_336" + n_head = 16 + return_list = [20, 21, 22, 23] + else: + raise NotImplementedError + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers, + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone, + init_zero=init_zero, return_list=return_list, + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + # convert_weights(model) + temporal_key = 'visual.temporal_positional_embedding' + temporal_key2 = 'evl.pemb_t' + + if temporal_key in state_dict and t_size != state_dict[temporal_key].size(1): + state_dict[temporal_key] = interpolate_temporal_pos_embed(state_dict[temporal_key], t_size) + state_dict[temporal_key2] = interpolate_temporal_pos_embed(state_dict[temporal_key2], t_size) + for kk, vv in state_dict.items(): + if 'rpb_t' in kk: + size_old = state_dict[kk].shape + state_dict[kk] = interploate_rpb(vv, t_size) + size_new = state_dict[kk].shape + print('Interpolating' ,kk, size_old, '-->', size_new) + # set_trace() + + # print('$' * 100) + # for k, v in model.state_dict().items(): + # print(k, v.shape) + + + if mergeclip: + assert 0.0 <= mergeweight <= 1.0 + assert clip_state_dict is not None + clip_sd = {k: v.cpu() for k, v in clip_state_dict.items()} + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in clip_sd: + del clip_sd[key] + + ## trick: use this func to transfer 2d clip weights to 3d, and mark back + evl_utils.clip_vit_fusion.clip_load_state_dict(model, clip_sd) + loaded_dict = model.state_dict() + clip_sd_new = {k: v.cpu() for k, v in loaded_dict.items() if k in clip_sd} + + # set_trace() + + new_sd = deepcopy(state_dict) + + for k in new_sd: + if k not in clip_sd_new: + continue + if any(x in k for x in clip_sd_new.keys()): + print('merging: ', k, '\t', clip_sd_new[k].shape, state_dict[k].shape) + new_sd[k] = clip_sd_new[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + + ############## only merge the clip text features, this is for ActivityNet ########### + # if 'visual' in k: + # new_sd[k] = clip_sd[k] + # else: + # new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + ################################################################################ + + ############## only merge the clip visual features, this is for MSVD ########### + # if 'visual' in k: + # new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + # else: + # new_sd[k] = clip_sd[k] + ################################################################################ + state_dict = new_sd + + # print('$' * 100) + # for k, v in state_dict.items(): + # print(k, v.shape) + + if not no_pretrain: + # msg = evl_utils.clip_vit_fusion.clip_load_state_dict(model, state_dict) + msg = model.load_state_dict(state_dict, strict=False) + print(msg) + return model.eval() diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/model_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_freeze.py new file mode 100644 index 0000000..b89aabb --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_freeze.py @@ -0,0 +1,142 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_l_sparse16(pretrained=True): + # 16x224x224 + # k400 1x1: 86.5 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l_sparse32(pretrained=True): + # 32x224x224 + # k400 1x1: 87.0 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l336_sparse32(pretrained=True): + # 32x336x336 + # k400 1x1: 87.4 + model = EVL( + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_l_sparse16() + cal_flops(model, frame=16, size=224) + + # model = vit_l_sparse32() + # cal_flops(model, frame=32, size=224) + + # model = vit_l336_sparse32() + # cal_flops(model, frame=32, size=336) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze.py new file mode 100644 index 0000000..657c677 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze.py @@ -0,0 +1,116 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=False, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, add_residual=add_residual + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.4 + model = EVL( + backbone='vit_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + +def vit_2plus1d_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + # model = vit_b_sparse8() + # cal_flops(model, frame=8, size=224) + + model = vit_2plus1d_b_sparse8() + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_diff.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_diff.py new file mode 100644 index 0000000..cce17f0 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_diff.py @@ -0,0 +1,112 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder_uniformer_diff_conv_balance + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + n_layers=12, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_frames=8, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=4, + uni_type='3d', + add_ffn=True, + t_conv_type='1d', + pre_prompt=True, + balance=0., + after_me=True, + before_me=False, + me_type='dstm', + me_reduction=4, + num_classes=400, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False, num_frames=num_frames, t_size=t_size) + + self.evl = TransformerDecoder_uniformer_diff_conv_balance( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + pre_prompt=pre_prompt, balance=balance, + after_me=after_me, before_me=before_me, + me_type=me_type, me_reduction=me_reduction, + num_classes=num_classes + ) + self.return_num = n_layers + + def forward(self, x, mode='image'): + features = self.backbone(x, return_num=self.return_num, mode=mode) + output = self.evl(features, mode=mode) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_2plus1d_diff_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_dw_bias_b16', + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_frames=8, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_2plus1d_diff_b_sparse8() + cal_flops(model, frame=1, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_only_global.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_only_global.py new file mode 100644 index 0000000..80d8744 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_only_global.py @@ -0,0 +1,109 @@ +import os +import time + +import torch +import torch.nn as nn +# from fvcore.nn import FlopCountAnalysis +# from fvcore.nn import flop_count_table + +from modules.clip_evl import evl_utils + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + t_size=16, + backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=174, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone]( + pretrained=False, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + def forward(self, x, mode='video'): + output = self.backbone(x, mode=mode) + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_only_global_b_sparse8_k400(pretrained=True): + model = EVL( + backbone='vit_only_global_b16', + t_size=8, + backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +def vit_only_global_l_sparse8_k400(pretrained=True): + model = EVL( + backbone='vit_only_global_l14', + t_size=8, + backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_only_global_l_sparse8_k400() + # cal_flops(model, frame=1, size=224) + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_uniformer.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_uniformer.py new file mode 100644 index 0000000..8b13db4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/model_no_freeze_uniformer.py @@ -0,0 +1,113 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +from . import evl_utils + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + t_size=16, + dw_reduction=1.5, + backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=174, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone]( + pretrained=False, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + def forward(self, x, mode='video'): + output = self.backbone(x, mode=mode) + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_fusion_b_sparse16_k400(pretrained=True): + model = EVL( + backbone='vit_fusion_b16', + t_size=16, + dw_reduction=1.5, + backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +def vit_fusion_b_sparse16_sthsth(pretrained=True): + model = EVL( + backbone='vit_fusion_b16', + t_size=16, + dw_reduction=1.5, + backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=174, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_fusion_b_sparse16_k400() + # cal_flops(model, frame=1, size=224) + cal_flops(model, frame=16, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_evl/simple_tokenizer.py b/Downstream/Video-Text-Retrieval/modules/clip_evl/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_evl/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/__init__.py new file mode 100644 index 0000000..dcc5619 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/__init__.py @@ -0,0 +1 @@ +from .clip import * diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..208e199 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/clip.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/clip.cpython-36.pyc new file mode 100644 index 0000000..f33021d Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/clip.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/model.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/model.cpython-36.pyc new file mode 100644 index 0000000..475650e Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/model.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/simple_tokenizer.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/simple_tokenizer.cpython-36.pyc new file mode 100644 index 0000000..a78ed46 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/__pycache__/simple_tokenizer.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/bpe_simple_vocab_16e6.txt.gz b/Downstream/Video-Text-Retrieval/modules/clip_kc/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..4903fdf --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1 @@ +../bpe_simple_vocab_16e6.txt.gz \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/clip.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/clip.py new file mode 100644 index 0000000..e48ca6b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/clip.py @@ -0,0 +1,257 @@ +import hashlib +import os +import urllib +import warnings +from typing import Any, Union, List +from pkg_resources import packaging + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm +from ipdb import set_trace +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +_tokenizer = _Tokenizer() + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load( + name: str, + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, download_root: str = None, + # evl + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, + ): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + download_root: str + path to download the model files; by default, it uses "~/.cache/clip" + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + ''' + with open(model_path, 'rb') as opened_file: + try: + # loading JIT archive + model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + ''' + init_state_dict = torch.load(model_path, map_location='cpu')['state_dict'] + state_dict = {} + for k, v in init_state_dict.items(): + k = k.replace('clip.','') + state_dict[k] = v + #set_trace() + if not jit: + model = build_model( + state_dict or model.state_dict(), + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain + ).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens):] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__init__.py new file mode 100644 index 0000000..6e0e14d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__init__.py @@ -0,0 +1,5 @@ +from .evl_module import TransformerDecoder +from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance +from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336 +from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336 +from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..c192a8d Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/__init__.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..afd1eb8 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention.cpython-36.pyc new file mode 100644 index 0000000..bd81d23 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention.cpython-37.pyc new file mode 100644 index 0000000..3cf3c2f Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_bias.cpython-36.pyc new file mode 100644 index 0000000..fdfa752 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_bias.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_bias.cpython-37.pyc new file mode 100644 index 0000000..5030943 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_bias.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module.cpython-36.pyc new file mode 100644 index 0000000..6860018 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module.cpython-37.pyc new file mode 100644 index 0000000..d1a0852 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc new file mode 100644 index 0000000..575bdea Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module_bias.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module_bias.cpython-37.pyc new file mode 100644 index 0000000..7443ee2 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/attention_module_bias.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit.cpython-36.pyc new file mode 100644 index 0000000..1115af0 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit.cpython-37.pyc new file mode 100644 index 0000000..8eff313 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc new file mode 100644 index 0000000..31d9dec Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d.cpython-37.pyc new file mode 100644 index 0000000..93f7c5f Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc new file mode 100644 index 0000000..82c52a1 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-37.pyc new file mode 100644 index 0000000..155afd5 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module.cpython-36.pyc new file mode 100644 index 0000000..6c50c51 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module.cpython-37.pyc new file mode 100644 index 0000000..7b4e0f1 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc new file mode 100644 index 0000000..0b7c81a Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-37.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-37.pyc new file mode 100644 index 0000000..192fe06 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-37.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention.py new file mode 100644 index 0000000..28ce56d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_bias.py new file mode 100644 index 0000000..006f56a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_bias.py @@ -0,0 +1,201 @@ +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module_bias import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False, rpb=None): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True, rpb=rpb) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, rpb=rpb) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, rpb=rpb) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module.py new file mode 100644 index 0000000..fd4e73b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module.py @@ -0,0 +1,316 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module_bias.py new file mode 100644 index 0000000..331c60b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/attention_module_bias.py @@ -0,0 +1,319 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False, + rpb: Tensor = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + - rpb: relative postion bias + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + if rpb is not None: + attn_output_weights = attn_output_weights + rpb + attn_output_weights = softmax(attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit.py new file mode 100644 index 0000000..5b8ea39 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python + +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x, return_qk=False): + if return_qk: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True) + return q, k, attn_output + else: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, return_qk=False): + if return_qk: + q, k, attn_output = self.attention(self.ln_1(x), return_qk=True) + x = x + self.drop_path(attn_output) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x, q, k + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x) + if i >= self.layers - return_num: + L, NT, C = x.shape + N = NT // T + features.append(x.view(L, N, T, C)) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4, return_qk=True): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_b32(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_b16(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14_336(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d.py new file mode 100644 index 0000000..3ad38bf --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d.py @@ -0,0 +1,255 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def attention_temporal(self, x): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8): + # temporal + # x: 1+HWT, N, C + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + res_temporal = self.attention_temporal(self.ln_t(xt)) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + res_spatial = self.attention(self.ln_1(xs)) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d_dw_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d_dw_bias.py new file mode 100644 index 0000000..6172712 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/clip_vit_2plus1d_dw_bias.py @@ -0,0 +1,306 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange +import torch.utils.checkpoint as checkpoint + +from .attention_bias import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}') + self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model) + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head])) + + idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long) + for q in range(t_size): + for k in range(t_size): + offs = q - k + t_size - 1 + idx_tensor_t[q, k] = offs + self.idx_tensor_t = idx_tensor_t + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head])) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long) + for q in range(spatial_size ** 2): + qi, qj = q // spatial_size, q % spatial_size + for k in range(spatial_size ** 2): + ki, kj = k // spatial_size, k % spatial_size + i_offs = qi - ki + spatial_size - 1 + j_offs = qj - kj + spatial_size - 1 + idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs + self.idx_tensor = idx_tensor + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.pos_embed.weight, 0) + nn.init.constant_(self.pos_embed.bias, 0) + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x, rpb=None): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def attention_temporal(self, x, rpb=None): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def forward(self, x, T=8): + # temporal + # x: 1+HWT, N, C + # pos_emb + tmp_x = x[1:, :, :] + LT, N, C = tmp_x.shape + L = LT // T + H = W = int(L ** 0.5) + tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1) + tmp_x = tmp_x + self.pos_embed(tmp_x) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C) + x[1:, :, :] = tmp_x + + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + # rpb_t: T, T, H => B*H, T, T + self.idx_tensor_t = self.idx_tensor_t.to(xt.device) + rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1) + res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + # rpb: L, L, H => B*H, L+1, L+1 + rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype) + self.idx_tensor = self.idx_tensor.to(xs.device) + rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1) + rpb = rpb.repeat(T*N, 1, 1) + res_spatial = self.attention(self.ln_1(xs), rpb=rpb) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, + drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0., + t_size=8, spatial_size=7 + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size) + + def forward(self, x, return_num=4): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_b16( + pretrained=True, num_frames=8, drop_path_rate=0., + t_size=8 +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + spatial_size=14 + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_dw_bias_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module.py new file mode 100644 index 0000000..a1f688e --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.xavier_uniform_(self.mlp[-1].weight) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder(nn.Module): + def __init__(self, n_layers=4, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, + use_t_conv=True, use_t_pos_embed=True, num_classes=400, + add_residual=False, + ): + super().__init__() + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + self.proj = nn.Sequential( + nn.LayerNorm(n_dim), + nn.Dropout(cls_dropout), + nn.Linear(n_dim, num_classes), + ) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.add_residual = add_residual + print(f'Add residual {add_residual}') + + if use_t_conv: + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + self.tconv = None + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + self.t_size = t_size + + def forward(self, clip_feats_all): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + if self.pemb_t is not None: + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + if self.add_residual: + residual = clip_feats_all[-1][0].mean(1) + return self.proj(x[0, :, :] + residual) + else: + return self.proj(x[0, :, :]) + + +if __name__ == '__main__': + model = TransformerDecoder() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(4): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + torch.zeros([L, N, T, C])) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module_uniformer_diff_conv_balance.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module_uniformer_diff_conv_balance.py new file mode 100644 index 0000000..22ea729 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/evl_utils/evl_module_uniformer_diff_conv_balance.py @@ -0,0 +1,515 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import trunc_normal_, DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class STM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(STM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + + # apply transformation conv to t+1 feature + conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:]) + __, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + + # motion fea = t+1_fea - t_fea + # pad the last timestamp + diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W + # pad = (0,0,0,0,0,0,0,1) + diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W + diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + y = self.recover(diff_fea_pluszero) # NT, C, H, W + + # reshape + y = y.reshape(N, T, C, L).permute(3, 0, 1, 2) + y = torch.cat([cls_token, y], dim=0) + return y + + +class DSTM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(DSTM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + # DW(T+1) - T + self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_pre = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1) + # DW(T-1) - T + self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_back = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + # apply transformation conv to t+1/t-1 feature + pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W + back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:]) + back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:]) + __, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + # pre_fea = t+1_fea - t_fea + # back_fea = t-1_fea - t_fea + pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W + back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W + # pad the last/first timestamp + pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W + pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W + back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + # recover channel + pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W + back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W + # reshape + y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2) + + # cat cls_token + y = torch.cat([cls_token, y], dim=0) + return y + + +class TDN(nn.Module): + def __init__(self, channel, n_segment=8, index=1, reduction=4): + super(TDN, self).__init__() + self.channel = channel + self.reduction = reduction + self.n_segment = n_segment + self.stride = 2**(index-1) + self.conv1 = nn.Conv2d(in_channels=self.channel, + out_channels=self.channel//self.reduction, + kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction, + kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False) + + self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2) + self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_forward = nn.Sigmoid() + + self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1) + self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_backward = nn.Sigmoid() + + self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1) + self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0) + + self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(num_features=self.channel) + + self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + def spatial_pool(self, x): + nt, channel, height, width = x.size() + input_x = x + # [N, C, H * W] + input_x = input_x.view(nt, channel, height * width) + # [N, 1, C, H * W] + input_x = input_x.unsqueeze(1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = context_mask.view(nt, 1, height * width) + # [N, 1, H * W] + context_mask = self.softmax(context_mask) + context_mask = context_mask.view(nt,1,height,width) + return context_mask + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + + bottleneck = self.conv1(fea) # nt, c//r, h, w + bottleneck = self.bn1(bottleneck) # nt, c//r, h, w + reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w + + t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w + _, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w + + conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w + reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w + _, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w + tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w + diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w + diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w + diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w + diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w + y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1 + y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1 + + y_forward_smallscale4 = diff_fea_pluszero_forward + y_backward_smallscale4 = diff_fea_pluszero_backward + y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2)) + y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2)) + + y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4)) + y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4)) + + y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:]) + y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:]) + + y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1 + y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1 + + y_forward = self.sigmoid_forward(y_forward) - 0.5 + y_backward = self.sigmoid_backward(y_backward) - 0.5 + + y = 0.5 * y_forward + 0.5 * y_backward + attn = fea * y + x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2) + x = torch.cat([cls_token, x], dim=0) + return x + + +class CMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = conv_1x1x1(in_features, hidden_features) + self.act = act_layer() + self.fc2 = conv_1x1x1(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class CBlock(nn.Module): + def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True): + super().__init__() + self.norm1 = bn_3d(dim) + self.conv1 = conv_1x1x1(dim, dim, 1) + self.conv2 = conv_1x1x1(dim, dim, 1) + if uni_type == '3d': + print('Use 3d conv for local MHRA') + self.attn = conv_3x3x3(dim, dim, groups=dim) + else: + print('Use 2d conv for local MHRA') + self.attn = conv_1x3x3(dim, dim, groups=dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.add_ffn = add_ffn + if add_ffn: + print('Add FFN in local MHRA') + self.norm2 = bn_3d(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout) + + print('Init zero') + nn.init.constant_(self.conv2.weight, 0.) + nn.init.constant_(self.conv2.bias, 0.) + if add_ffn: + nn.init.constant_(self.mlp.fc2.weight, 0.) + nn.init.constant_(self.mlp.fc2.bias, 0.) + + def forward(self, x): + x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) + if self.add_ffn: + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + # nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + # nn.init.xavier_uniform_(self.mlp[-1].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder_uniformer_diff_conv_balance(nn.Module): + def __init__(self, n_layers=4, + uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + balance=0., + use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4, + use_t_pos_embed=True, num_classes=400): + super().__init__() + + n_layers += uni_layer + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + + self.uni_layer = uni_layer + self.uni_dec = nn.ModuleList([ + CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn) + for i in range(uni_layer) + ]) + + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + + self.pre_prompt = pre_prompt + if pre_prompt: + print('Add pre prompt') + self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + + if use_t_conv: + self.t_conv_type = t_conv_type + if t_conv_type == '1d': + print('Use 1d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + print('Use 3d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + else: + self.tconv = None + + self.before_me = before_me + self.after_me = after_me + if before_me or after_me: + assert before_me != after_me + print(f'Use {me_type} attention, Before {before_me}, After {after_me}') + if me_type == 'stm': + me_op = STM + elif me_type == 'dstm': + me_op = DSTM + elif me_type == 'tdn': + me_op = TDN + self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)]) + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + print(F'Balnce weight {balance}') + self.balance = nn.Parameter(torch.ones((n_dim)) * balance) + self.sigmoid = nn.Sigmoid() + + def forward(self, clip_feats_all): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + if self.after_me: + origin_clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.before_me: + # contain residual + clip_feats[i] = self.me[i](clip_feats[i]) + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + if self.t_conv_type == '1d': + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + else: + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + if self.pemb_t is not None: + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + if self.after_me: + clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i]) + if i < self.uni_layer: + # L, N, T, C + L, N, T, C = clip_feats[i].shape + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + if self.pre_prompt: + pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + for i in range(len(self.dec)): + if i < self.uni_layer: + pre_x = self.dec[i](pre_x, clip_feats[i]) + elif i == self.uni_layer: + clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0) + x = self.dec[i](x, clip_feats[i]) + else: + x = self.dec[i](x, clip_feats[i]) + else: + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + # real residual + # L, N, T, C + residual = clip_feats_all[-1][0].mean(1) + weight = self.sigmoid(self.balance) + + # return self.proj((1 - weight) * x[0, :, :] + weight * residual) + return (1 - weight) * x[0, :, :] + weight * residual + + +if __name__ == '__main__': + model = TransformerDecoder_uniformer_diff_conv_balance() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(8): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + tuple(torch.zeros([L, N, T, C]) for _ in range(3))) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model) + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/model.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/model.py new file mode 100644 index 0000000..b6d8b88 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/model.py @@ -0,0 +1,334 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from einops import rearrange + +from . import evl_utils +from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor): + return self.resblocks(x) + + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, + backbone='vit_2plus1d_dw_bias_b16', + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + ): + super().__init__() + + # All assertions is for adhoc clip_kc and should be removed + # assert vision_layers == 12, vision_layers + assert image_resolution == 224, image_resolution + # assert vision_patch_size == 32, vision_patch_size + assert vision_width == n_dim, (vision_width, n_dim) + + self.vision_width = n_dim + + self.context_length = context_length + + vision_heads = vision_width // 64 + self.visual = evl_utils.__dict__[backbone](pretrained=False) + self.evl = TransformerDecoder_uniformer_diff_conv_balance( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + pre_prompt=pre_prompt, balance=balance, + after_me=after_me, before_me=before_me, + me_type=me_type, me_reduction=me_reduction, + ) + self.visual_ln_post = nn.LayerNorm(n_dim) + scale = n_dim ** -0.5 + self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim)) + self.return_qk = use_image_attnmap + self.return_num = n_layers + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask() + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.embed_dim = embed_dim + + # We seperate the mask embedding to load pretrained model + self.mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + nn.init.normal_(self.mask_embedding, std=0.02) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + nn.init.constant_(self.visual_ln_post.weight, 1.0) + nn.init.constant_(self.visual_ln_post.bias, 0.0) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_video(self, video, return_all_feats=False): + if len(video.size()) == 4: #[bs * T, C, H, W] + #set_trace() + frames = 8 + video = rearrange(video, '(b t) c h w -> b t c h w', b=int(video.size(0)/frames), t=frames) + video = rearrange(video, 'b t c h w -> b c t h w') + + # video: [N, C, T, H, W] + features = self.visual(video, return_num=self.return_num) + x = self.visual_ln_post(self.evl(features)) + x = x @ self.visual_proj + + if return_all_feats: + return x, features[-1] # [N, T, C], [L, N, T, C] + + return x + + def encode_text(self, text, masked_indices=None, return_all_feats=False): + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + + if masked_indices is not None: + x[masked_indices] = self.mask_embedding + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + + if return_all_feats: + return feats, x + + return feats + + def forward(self, video, text): + video_features = self.encode_video(video) + text_features = self.encode_text(text) + + # normalized features + video_features = video_features / video_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_video= logit_scale * video_features @ text_features.t() + logits_per_text = logits_per_video.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_video, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + + +def build_model( + state_dict: dict, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False, + ): + vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict + + if "visual.proj" in state_dict: + state_dict["visual_proj"] = state_dict["visual.proj"] + state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"] + state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"] + del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"] + # new_state_dict = OrderedDict() + # for k, v in state_dict.items(): + # if k.startswith("backbone."): + # k = k.replace("backbone.", "visual.") + + # new_state_dict[k] = v + + # state_dict = new_state_dict + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + # embed_dim = 512 + # context_length = 77 + # vocab_size = 49408 + # transformer_width = 512 + # transformer_layers = 12 + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + vision_width = state_dict["visual_proj"].shape[0] + n_dim = vision_width + if vision_width == 768: + backbone = "vit_2plus1d_dw_bias_b16" + n_head = 12 + elif vision_width == 1024: + backbone = "vit_2plus1d_dw_bias_l14" + n_head = 16 + else: + raise NotImplementedError + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers, + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + # convert_weights(model) + + # strict=False, for parameters of decoder + # assert False, (len(model.state_dict()), len(state_dict)) + if not no_pretrain: + model.load_state_dict(state_dict, strict=False) + return model.eval() diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/model_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/model_freeze.py new file mode 100644 index 0000000..b89aabb --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/model_freeze.py @@ -0,0 +1,142 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_l_sparse16(pretrained=True): + # 16x224x224 + # k400 1x1: 86.5 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l_sparse32(pretrained=True): + # 32x224x224 + # k400 1x1: 87.0 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l336_sparse32(pretrained=True): + # 32x336x336 + # k400 1x1: 87.4 + model = EVL( + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_l_sparse16() + cal_flops(model, frame=16, size=224) + + # model = vit_l_sparse32() + # cal_flops(model, frame=32, size=224) + + # model = vit_l336_sparse32() + # cal_flops(model, frame=32, size=336) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze.py new file mode 100644 index 0000000..657c677 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze.py @@ -0,0 +1,116 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=False, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, add_residual=add_residual + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.4 + model = EVL( + backbone='vit_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + +def vit_2plus1d_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + # model = vit_b_sparse8() + # cal_flops(model, frame=8, size=224) + + model = vit_2plus1d_b_sparse8() + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze_diff.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze_diff.py new file mode 100644 index 0000000..1d35df8 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/model_no_freeze_diff.py @@ -0,0 +1,110 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder_uniformer_diff_conv_balance + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + n_layers=12, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=4, + uni_type='3d', + add_ffn=True, + t_conv_type='1d', + pre_prompt=True, + balance=0., + after_me=True, + before_me=False, + me_type='dstm', + me_reduction=4, + num_classes=400, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder_uniformer_diff_conv_balance( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + pre_prompt=pre_prompt, balance=balance, + after_me=after_me, before_me=before_me, + me_type=me_type, me_reduction=me_reduction, + num_classes=num_classes + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_2plus1d_diff_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_dw_bias_b16', + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + num_classes=400, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_2plus1d_diff_b_sparse8() + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc/simple_tokenizer.py b/Downstream/Video-Text-Retrieval/modules/clip_kc/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__init__.py new file mode 100644 index 0000000..dcc5619 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__init__.py @@ -0,0 +1 @@ +from .clip import * diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..06ad8a6 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/clip.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/clip.cpython-36.pyc new file mode 100644 index 0000000..922f766 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/clip.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/clip_decoders.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/clip_decoders.cpython-36.pyc new file mode 100644 index 0000000..ce85dc6 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/clip_decoders.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/coca.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/coca.cpython-36.pyc new file mode 100644 index 0000000..b75f367 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/coca.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/model.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/model.cpython-36.pyc new file mode 100644 index 0000000..d8c394c Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/model.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/simple_tokenizer.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/simple_tokenizer.cpython-36.pyc new file mode 100644 index 0000000..e948f9b Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/__pycache__/simple_tokenizer.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/bpe_simple_vocab_16e6.txt.gz b/Downstream/Video-Text-Retrieval/modules/clip_kc2/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/clip.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/clip.py new file mode 100644 index 0000000..31507e3 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/clip.py @@ -0,0 +1,257 @@ +import hashlib +import os +import urllib +import warnings +from typing import Any, Union, List +from pkg_resources import packaging + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +_tokenizer = _Tokenizer() + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load( + name: str, + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, download_root: str = None, + # evl + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None, + ): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + download_root: str + path to download the model files; by default, it uses "~/.cache/clip" + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + ''' + with open(model_path, 'rb') as opened_file: + try: + # loading JIT archive + model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + ''' + init_state_dict = torch.load(model_path, map_location='cpu')['state_dict'] + state_dict = {} + for k, v in init_state_dict.items(): + k = k.replace('clip.','') + state_dict[k] = v + + if not jit: + model = build_model( + state_dict or model.state_dict(), + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain, + init_zero=init_zero, mergeclip=mergeclip, mergeweight=mergeweight, use_capdecoder=use_capdecoder, clip_state_dict=clip_state_dict, + ).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens):] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/clip_decoders.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/clip_decoders.py new file mode 100644 index 0000000..77e6bc7 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/clip_decoders.py @@ -0,0 +1,199 @@ +import torch +import torch.nn as nn + +from .evl_utils.evl_module import ResidualDecoderBlock +from .coca import Residual, ParallelTransformerBlock, CrossAttention +from einops import repeat + + +class CaptionDecoder(nn.Module): + def __init__( + self, + n_layers, + transformer_width, + vision_width, + transformer_heads, + vocab_size, + num_visual_queries=256, + ): + super().__init__() + scale = transformer_width ** -0.5 + self.visual_queries = nn.Parameter( + scale * torch.randn(num_visual_queries, transformer_width) + ) + dim_head = transformer_width // transformer_heads + ff_mult = 4 + + self.visual_attn_pooler = CrossAttention( + dim=transformer_width, + context_dim=vision_width, + dim_head=dim_head, + heads=transformer_heads, + norm_context=True, + ) + self.visual_pooler_norm = nn.LayerNorm(transformer_width) + + self.text_norm = nn.LayerNorm(transformer_width) + + self.multimodal_layers = nn.ModuleList([]) + for ind in range(n_layers): + self.multimodal_layers.append( + nn.ModuleList( + [ + Residual( + ParallelTransformerBlock( + dim=transformer_width, + dim_head=dim_head, + heads=transformer_heads, + ff_mult=ff_mult, + ) + ), + Residual( + CrossAttention( + dim=transformer_width, + dim_head=dim_head, + heads=transformer_heads, + parallel_ff=True, + ff_mult=ff_mult, + ) + ), + ] + ) + ) + + self.predictor = nn.Sequential( + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, transformer_width), + nn.GELU(), + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, vocab_size), + ) + + def forward(self, image_feats, text_embeds): + # image_feats: # L, N, T, C + # text_feats: embeded text feats # N, L, C + # [L, N, T, C] -> [N, T * L, C] + image_feats = image_feats.permute(1, 0, 2, 3).flatten(1, 2) + visual_queries = repeat( + self.visual_queries, 'n d -> b n d', b=image_feats.shape[0] + ) + image_feats = self.visual_pooler_norm( + self.visual_attn_pooler(visual_queries, image_feats) + ) + + text_embeds = self.text_norm(text_embeds) + # go through multimodal layers + for attn_ff, cross_attn in self.multimodal_layers: + text_embeds = attn_ff(text_embeds) + text_embeds = cross_attn(text_embeds, image_feats) + + logits = self.predictor(text_embeds) + + return logits + + +class MaskedTextDecoder(nn.Module): + def __init__( + self, + n_layers, + transformer_width, + vision_width, + transformer_heads, + vocab_size, + drop_rate, + drop_path_rate, + ): + super().__init__() + self.visual_encoder_to_decoder = nn.Sequential( + nn.LayerNorm(vision_width), nn.Linear(vision_width, transformer_width) + ) + self.text_encoder_to_decoder = nn.Sequential( + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, transformer_width), + ) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, n_layers) + ] # stochastic depth decay rule + # We are + self.text_decoder = nn.ModuleList( + [ + ResidualDecoderBlock( + d_model=transformer_width, + n_head=transformer_heads, + dropout=drop_rate, + drop_path=dpr[i], + ) + for i in range(n_layers) + ] + ) + self.text_decoder_ln = nn.LayerNorm(transformer_width) + + def forward(self, text_feats, visual_feats): + visual_feats = self.visual_encoder_to_decoder(visual_feats) + text_feats = self.text_encoder_to_decoder(text_feats) + # ! Shape + # [L, N, T, C] -> [T * L, N, C] + visual_feats = visual_feats.permute(2, 0, 1, 3).flatten(0, 1) + # [N, L, C] -> [L, N, C] + text_feats = text_feats.permute(1, 0, 2) + for dec in self.text_decoder: + text_feats = dec(text_feats, visual_feats) + text_feats = self.text_decoder_ln(text_feats).permute(1, 0, 2) + + return text_feats + + +class MaskedVisualDecoder(nn.Module): + def __init__( + self, + n_layers, + transformer_width, + vision_width, + transformer_heads, + patch_size, + drop_path_rate=0.0, + ): + super().__init__() + self.visual_encoder_to_decoder = nn.Sequential( + nn.LayerNorm(vision_width), nn.Linear(vision_width, transformer_width) + ) + self.text_encoder_to_decoder = nn.Sequential( + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, transformer_width), + ) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, n_layers) + ] # stochastic depth decay rule + # We are setting the d_model as transformer_width because we want the decoder to be small + # Mayber later I will add a specific setting for this + self.vision_decoder = nn.ModuleList( + [ + ResidualDecoderBlock( + d_model=transformer_width, + n_head=transformer_heads, + drop_path=dpr[i], + ) + for i in range(n_layers) + ] + ) + self.predictor = nn.Sequential( + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, transformer_width), + nn.GELU(), + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, 3 * patch_size * patch_size), + ) + + def forward(self, text_feats, visual_feats): + # Remove cls_token first + visual_feats = self.visual_encoder_to_decoder(visual_feats[1:]) + text_feats = self.text_encoder_to_decoder(text_feats) + # [L, N, T, C] -> [T * L, N, C] + visual_feats = visual_feats.permute(2, 0, 1, 3).flatten(0, 1) + # [N, L, C] -> [L, N, C] + text_feats = text_feats.permute(1, 0, 2) + for dec in self.vision_decoder: + visual_feats = dec(visual_feats, text_feats) + visual_feats = self.predictor(visual_feats).permute(1, 0, 2) # [N, L, C] + + return visual_feats \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/coca.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/coca.py new file mode 100644 index 0000000..381127c --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/coca.py @@ -0,0 +1,477 @@ +# Modified from https://github.com/lucidrains/CoCa-pytorch/blob/main/coca_pytorch/coca_pytorch.py + +from turtle import forward +import torch +from torch import einsum, nn +import torch.nn.functional as F +from einops import rearrange, repeat + +# helper functions + +def exists(val): + return val is not None + +def default(val, d): + return val if exists(val) else d + +# for controlling freezing of parameters + +def set_module_requires_grad_(module, requires_grad): + for param in module.parameters(): + param.requires_grad = requires_grad + +def freeze_all_layers_(module): + set_module_requires_grad_(module, False) + +def unfreeze_all_layers_(module): + set_module_requires_grad_(module, True) + +def freeze_model_and_make_eval_(model): + model.eval() + freeze_all_layers_(model) + +# normalization +# they use layernorm without bias, something that pytorch does not offer + + +class LayerNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.ones(dim)) + self.register_buffer("beta", torch.zeros(dim)) + + def forward(self, x): + return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta) + +# residual + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x, *args, **kwargs): + return self.fn(x, *args, **kwargs) + x + + +# rotary positional embedding +# https://arxiv.org/abs/2104.09864 + + +class RotaryEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer("inv_freq", inv_freq) + + def forward(self, max_seq_len, *, device): + seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype) + freqs = einsum("i , j -> i j", seq, self.inv_freq) + return torch.cat((freqs, freqs), dim=-1) + + +def rotate_half(x): + x = rearrange(x, "... (j d) -> ... j d", j=2) + x1, x2 = x.unbind(dim=-2) + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(pos, t): + return (t * pos.cos()) + (rotate_half(t) * pos.sin()) + + +# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward +# https://arxiv.org/abs/2002.05202 + + +class SwiGLU(nn.Module): + def forward(self, x): + x, gate = x.chunk(2, dim=-1) + return F.silu(gate) * x + + +# parallel attention and feedforward with residual +# discovered by Wang et al + EleutherAI from GPT-J fame + + +class ParallelTransformerBlock(nn.Module): + def __init__(self, dim, dim_head=64, heads=8, ff_mult=4): + super().__init__() + self.norm = LayerNorm(dim) + + attn_inner_dim = dim_head * heads + ff_inner_dim = dim * ff_mult + self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2)) + + self.heads = heads + self.scale = dim_head**-0.5 + self.rotary_emb = RotaryEmbedding(dim_head) + + self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False) + self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False) + + self.ff_out = nn.Sequential( + SwiGLU(), + nn.Linear(ff_inner_dim, dim, bias=False) + ) + + # for caching causal mask and rotary embeddings + + self.register_buffer("mask", None, persistent=False) + self.register_buffer("pos_emb", None, persistent=False) + + def get_mask(self, n, device): + if self.mask is not None and self.mask.shape[-1] >= n: + return self.mask[:n, :n] + + mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1) + self.register_buffer("mask", mask, persistent=False) + return mask + + def get_rotary_embedding(self, n, device): + if self.pos_emb is not None and self.pos_emb.shape[-2] >= n: + return self.pos_emb[:n] + + pos_emb = self.rotary_emb(n, device=device) + self.register_buffer("pos_emb", pos_emb, persistent=False) + return pos_emb + + def forward(self, x, attn_mask=None): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + n, device, h = x.shape[1], x.device, self.heads + + # pre layernorm + + x = self.norm(x) + + # attention queries, keys, values, and feedforward inner + + q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1) + + # split heads + # they use multi-query single-key-value attention, yet another Noam Shazeer paper + # they found no performance loss past a certain scale, and more efficient decoding obviously + # https://arxiv.org/abs/1911.02150 + + q = rearrange(q, "b n (h d) -> b h n d", h=h) + + # rotary embeddings + + positions = self.get_rotary_embedding(n, device) + q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k)) + + # scale + + q = q * self.scale + + # similarity + + sim = einsum("b h i d, b j d -> b h i j", q, k) + + # causal mask + + causal_mask = self.get_mask(n, device) + sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) + + # extra attention mask - for masking out attention from text CLS token to padding + + if exists(attn_mask): + attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j') + sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max) + + # attention + + sim = sim - sim.amax(dim=-1, keepdim=True).detach() + attn = sim.softmax(dim=-1) + + # aggregate values + + out = einsum("b h i j, b j d -> b h i d", attn, v) + + # merge heads + + out = rearrange(out, "b h n d -> b n (h d)") + return self.attn_out(out) + self.ff_out(ff) + +# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward + +class CrossAttention(nn.Module): + def __init__( + self, + dim, + *, + context_dim=None, + dim_head=64, + heads=8, + parallel_ff=False, + ff_mult=4, + norm_context=False + ): + super().__init__() + self.heads = heads + self.scale = dim_head ** -0.5 + inner_dim = heads * dim_head + context_dim = default(context_dim, dim) + + self.norm = LayerNorm(dim) + self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity() + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + # whether to have parallel feedforward + + ff_inner_dim = ff_mult * dim + + self.ff = nn.Sequential( + nn.Linear(dim, ff_inner_dim * 2, bias=False), + SwiGLU(), + nn.Linear(ff_inner_dim, dim, bias=False) + ) if parallel_ff else None + + def forward(self, x, context): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + # pre-layernorm, for queries and context + + x = self.norm(x) + context = self.context_norm(context) + + # get queries + + q = self.to_q(x) + q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads) + + # scale + + q = q * self.scale + + # get key / values + + k, v = self.to_kv(context).chunk(2, dim=-1) + + # query / key similarity + + sim = einsum('b h i d, b j d -> b h i j', q, k) + + # attention + + sim = sim - sim.amax(dim=-1, keepdim=True) + attn = sim.softmax(dim=-1) + + # aggregate + + out = einsum('b h i j, b j d -> b h i d', attn, v) + + # merge and combine heads + + out = rearrange(out, 'b h n d -> b n (h d)') + out = self.to_out(out) + + # add parallel feedforward (for multimodal layers) + + if exists(self.ff): + out = out + self.ff(x) + + return out + +# transformer + + +class CoCa(nn.Module): + def __init__( + self, + *, + dim, + num_tokens, + unimodal_depth, + multimodal_depth, + image_dim = None, + num_img_queries=256, + dim_head=64, + heads=8, + ff_mult=4, + img_encoder=None, + caption_loss_weight=1., + contrastive_loss_weight=1., + pad_id=0 + ): + super().__init__() + self.dim = dim + + self.pad_id = pad_id + self.caption_loss_weight = caption_loss_weight + self.contrastive_loss_weight = contrastive_loss_weight + + # token embeddings + + self.token_emb = nn.Embedding(num_tokens, dim) + self.text_cls_token = nn.Parameter(torch.randn(dim)) + + # image encoder + + self.img_encoder = img_encoder + + if exists(self.img_encoder): + freeze_model_and_make_eval_(self.img_encoder) + + # attention pooling for image tokens + + self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning + self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True) + + self.img_attn_pool_norm = LayerNorm(dim) + self.text_cls_norm = LayerNorm(dim) + + # contrastive learning temperature + + self.temperature = nn.Parameter(torch.Tensor([1.])) + + # unimodal layers + + self.unimodal_layers = nn.ModuleList([]) + for ind in range(unimodal_depth): + self.unimodal_layers.append( + Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)), + ) + + # multimodal layers + + self.multimodal_layers = nn.ModuleList([]) + for ind in range(multimodal_depth): + self.multimodal_layers.append(nn.ModuleList([ + Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)), + Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult)) + ])) + + # to logits + + self.to_logits = nn.Sequential( + LayerNorm(dim), + nn.Linear(dim, num_tokens, bias=False) + ) + + # they used embedding weight tied projection out to logits, not common, but works + self.to_logits[-1].weight = self.token_emb.weight + nn.init.normal_(self.token_emb.weight, std=0.02) + + def embed_text(self, text): + batch, device = text.shape[0], text.device + + seq = text.shape[1] + + text_tokens = self.token_emb(text) + + # append text cls tokens + + text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch) + text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2) + + # create specific mask for text cls token at the end + # to prevent it from attending to padding + + cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j') + attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True) + + # go through unimodal layers + + for attn_ff in self.unimodal_layers: + text_tokens = attn_ff(text_tokens, attn_mask=attn_mask) + + # get text cls token + + text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1] + text_embeds = self.text_cls_norm(text_cls_tokens) + return text_embeds, text_tokens + + def embed_image(self, images=None, image_tokens=None): + # encode images into embeddings + # with the img_encoder passed in at init + # it can also accept precomputed image tokens + + assert not (exists(images) and exists(image_tokens)) + + if exists(images): + assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding' + + self.img_encoder.eval() + with torch.no_grad(): + image_tokens = self.img_encoder(images).detach() + + # attention pool image tokens + + img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0]) + img_queries = self.img_attn_pool(img_queries, image_tokens) + img_queries = self.img_attn_pool_norm(img_queries) + + return img_queries[:, 0], img_queries[:, 1:] + + def forward( + self, + text, + images=None, + image_tokens=None, + labels=None, + return_loss=False, + return_embeddings=False + ): + batch, device = text.shape[0], text.device + + if return_loss and not exists(labels): + text, labels = text[:, :-1], text[:, 1:] + + text_embeds, text_tokens = self.embed_text(text) + + image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens) + + # return embeddings if that is what the researcher wants + + if return_embeddings: + return text_embeds, image_embeds + + # go through multimodal layers + + for attn_ff, cross_attn in self.multimodal_layers: + text_tokens = attn_ff(text_tokens) + text_tokens = cross_attn(text_tokens, image_tokens) + + logits = self.to_logits(text_tokens) + + if not return_loss: + return logits + + # shorthand + + ce = F.cross_entropy + + # calculate caption loss (cross entropy loss) + + logits = rearrange(logits, 'b n c -> b c n') + caption_loss = ce(logits, labels, ignore_index=self.pad_id) + caption_loss = caption_loss * self.caption_loss_weight + + # calculate contrastive loss + + sim = einsum('i d, j d -> i j', text_embeds, image_embeds) + sim = sim * self.temperature.exp() + contrastive_labels = torch.arange(batch, device=device) + + contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5 + contrastive_loss = contrastive_loss * self.contrastive_loss_weight + + return caption_loss + contrastive_loss diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__init__.py new file mode 100644 index 0000000..6e0e14d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__init__.py @@ -0,0 +1,5 @@ +from .evl_module import TransformerDecoder +from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance +from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336 +from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336 +from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..f14dc43 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention.cpython-36.pyc new file mode 100644 index 0000000..d535e82 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_bias.cpython-36.pyc new file mode 100644 index 0000000..22dfe67 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_module.cpython-36.pyc new file mode 100644 index 0000000..e8f4520 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc new file mode 100644 index 0000000..a29e5ef Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit.cpython-36.pyc new file mode 100644 index 0000000..d0563d8 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc new file mode 100644 index 0000000..d2ce528 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc new file mode 100644 index 0000000..98c2d2f Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/evl_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/evl_module.cpython-36.pyc new file mode 100644 index 0000000..da91140 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/evl_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc new file mode 100644 index 0000000..ce59204 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention.py new file mode 100644 index 0000000..28ce56d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_bias.py new file mode 100644 index 0000000..006f56a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_bias.py @@ -0,0 +1,201 @@ +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module_bias import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False, rpb=None): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True, rpb=rpb) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, rpb=rpb) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, rpb=rpb) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module.py new file mode 100644 index 0000000..fd4e73b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module.py @@ -0,0 +1,316 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module_bias.py new file mode 100644 index 0000000..331c60b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/attention_module_bias.py @@ -0,0 +1,319 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False, + rpb: Tensor = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + - rpb: relative postion bias + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + if rpb is not None: + attn_output_weights = attn_output_weights + rpb + attn_output_weights = softmax(attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit.py new file mode 100644 index 0000000..5b8ea39 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python + +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x, return_qk=False): + if return_qk: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True) + return q, k, attn_output + else: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, return_qk=False): + if return_qk: + q, k, attn_output = self.attention(self.ln_1(x), return_qk=True) + x = x + self.drop_path(attn_output) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x, q, k + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x) + if i >= self.layers - return_num: + L, NT, C = x.shape + N = NT // T + features.append(x.view(L, N, T, C)) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4, return_qk=True): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_b32(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_b16(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14_336(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d.py new file mode 100644 index 0000000..345ccf3 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d.py @@ -0,0 +1,258 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def attention_temporal(self, x): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8): + # temporal + # x: 1+HWT, N, C + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + res_temporal = self.attention_temporal(self.ln_t(xt)) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + res_spatial = self.attention(self.ln_1(xs)) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + + + def forward(self, x, return_num=4): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + # x = x + self.temporal_positional_embedding + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d_dw_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d_dw_bias.py new file mode 100644 index 0000000..1432a60 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/clip_vit_2plus1d_dw_bias.py @@ -0,0 +1,345 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange +import torch.utils.checkpoint as checkpoint + +from .attention_bias import MultiheadAttention +from ipdb import set_trace + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7, init_zero=True): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}') + self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model) + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head])) + + idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long) + for q in range(t_size): + for k in range(t_size): + offs = q - k + t_size - 1 + idx_tensor_t[q, k] = offs + self.idx_tensor_t = idx_tensor_t + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head])) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long) + for q in range(spatial_size ** 2): + qi, qj = q // spatial_size, q % spatial_size + for k in range(spatial_size ** 2): + ki, kj = k // spatial_size, k % spatial_size + i_offs = qi - ki + spatial_size - 1 + j_offs = qj - kj + spatial_size - 1 + idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs + self.idx_tensor = idx_tensor + + if init_zero: + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.pos_embed.weight, 0) + nn.init.constant_(self.pos_embed.bias, 0) + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + nn.init.constant_(self.ln_t.weight, 1.) + nn.init.constant_(self.ln_t.bias, 0.) + else: + nn.init.trunc_normal_(self.rpb_t, std=.02) + nn.init.trunc_normal_(self.rpb, std=.02) + + nn.init.trunc_normal_(self.pos_embed.weight, std=.02) + nn.init.constant_(self.pos_embed.bias, 0) + nn.init.trunc_normal_(self.attn_t.in_proj_weight, std=.02) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.trunc_normal_(self.attn_t.out_proj.weight, std=.02) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + nn.init.constant_(self.ln_t.weight, 1.) + nn.init.constant_(self.ln_t.bias, 0.) + + def attention(self, x, rpb=None): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def attention_temporal(self, x, rpb=None): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def forward(self, x, T=8, mode='video'): + # temporal + # x: 1+HWT, N, C + # pos_emb + tmp_x = x[1:, :, :] + LT, N, C = tmp_x.shape + L = LT // T + H = W = int(L ** 0.5) + tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1) + tmp_x = tmp_x + self.pos_embed(tmp_x) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C) + x[1:, :, :] = tmp_x + + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + # no rpb_t for image + if mode == 'image': + rpb_t = None + else: + # rpb_t: T, T, H => B*H, T, T + self.idx_tensor_t = self.idx_tensor_t.to(xt.device) + rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1) + + # set_trace() + res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + # rpb: L, L, H => B*H, L+1, L+1 + rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype) + self.idx_tensor = self.idx_tensor.to(xs.device) + rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1) + rpb = rpb.repeat(T*N, 1, 1) + res_spatial = self.attention(self.ln_1(xs), rpb=rpb) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7, init_zero=True): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, + drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size, init_zero=init_zero) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8, mode='video'): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T, mode=mode) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, + num_frames=8, drop_path_rate=0., t_size=8, spatial_size=7, init_zero=True, + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size, init_zero=init_zero) + self.mask_embedding = nn.Parameter(scale * torch.randn(width)) + + print('-' * 100) + print('tsize:', t_size, 'num frame: ', num_frames) + print('-' * 100) + + def forward(self, x, return_num=4, masked_indices=None, mode='video'): + if len(x.size()) == 5: + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + if masked_indices is not None: + masked_indices = masked_indices.view(N * T, -1) + x[masked_indices] = self.mask_embedding.type(x.dtype) + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + # add temporal position embedding for video + if mode == 'video': + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + # x = x + self.temporal_positional_embedding + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + else: + pass + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, mode=mode + ) + + return features + + +def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + init_zero=init_zero, + t_size=t_size, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_b16(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + spatial_size=14, + init_zero=init_zero, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + init_zero=init_zero, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8, init_zero=True): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + init_zero=init_zero, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_dw_bias_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module.py new file mode 100644 index 0000000..a1f688e --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.xavier_uniform_(self.mlp[-1].weight) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder(nn.Module): + def __init__(self, n_layers=4, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, + use_t_conv=True, use_t_pos_embed=True, num_classes=400, + add_residual=False, + ): + super().__init__() + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + self.proj = nn.Sequential( + nn.LayerNorm(n_dim), + nn.Dropout(cls_dropout), + nn.Linear(n_dim, num_classes), + ) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.add_residual = add_residual + print(f'Add residual {add_residual}') + + if use_t_conv: + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + self.tconv = None + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + self.t_size = t_size + + def forward(self, clip_feats_all): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + if self.pemb_t is not None: + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + if self.add_residual: + residual = clip_feats_all[-1][0].mean(1) + return self.proj(x[0, :, :] + residual) + else: + return self.proj(x[0, :, :]) + + +if __name__ == '__main__': + model = TransformerDecoder() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(4): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + torch.zeros([L, N, T, C])) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module_uniformer_diff_conv_balance.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module_uniformer_diff_conv_balance.py new file mode 100644 index 0000000..8ac990c --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/evl_utils/evl_module_uniformer_diff_conv_balance.py @@ -0,0 +1,533 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import trunc_normal_, DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class STM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(STM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + + # apply transformation conv to t+1 feature + conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:]) + __, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + + # motion fea = t+1_fea - t_fea + # pad the last timestamp + diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W + # pad = (0,0,0,0,0,0,0,1) + diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W + diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + y = self.recover(diff_fea_pluszero) # NT, C, H, W + + # reshape + y = y.reshape(N, T, C, L).permute(3, 0, 1, 2) + y = torch.cat([cls_token, y], dim=0) + return y + + +class DSTM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(DSTM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + # DW(T+1) - T + self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_pre = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1) + # DW(T-1) - T + self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_back = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + # apply transformation conv to t+1/t-1 feature + pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W + back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:]) + back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:]) + __, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + # pre_fea = t+1_fea - t_fea + # back_fea = t-1_fea - t_fea + pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W + back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W + # pad the last/first timestamp + pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W + pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W + back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + # recover channel + pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W + back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W + # reshape + y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2) + + # cat cls_token + y = torch.cat([cls_token, y], dim=0) + return y + + +class TDN(nn.Module): + def __init__(self, channel, n_segment=8, index=1, reduction=4): + super(TDN, self).__init__() + self.channel = channel + self.reduction = reduction + self.n_segment = n_segment + self.stride = 2**(index-1) + self.conv1 = nn.Conv2d(in_channels=self.channel, + out_channels=self.channel//self.reduction, + kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction, + kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False) + + self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2) + self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_forward = nn.Sigmoid() + + self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1) + self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_backward = nn.Sigmoid() + + self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1) + self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0) + + self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(num_features=self.channel) + + self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + def spatial_pool(self, x): + nt, channel, height, width = x.size() + input_x = x + # [N, C, H * W] + input_x = input_x.view(nt, channel, height * width) + # [N, 1, C, H * W] + input_x = input_x.unsqueeze(1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = context_mask.view(nt, 1, height * width) + # [N, 1, H * W] + context_mask = self.softmax(context_mask) + context_mask = context_mask.view(nt,1,height,width) + return context_mask + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + + bottleneck = self.conv1(fea) # nt, c//r, h, w + bottleneck = self.bn1(bottleneck) # nt, c//r, h, w + reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w + + t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w + _, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w + + conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w + reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w + _, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w + tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w + diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w + diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w + diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w + diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w + y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1 + y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1 + + y_forward_smallscale4 = diff_fea_pluszero_forward + y_backward_smallscale4 = diff_fea_pluszero_backward + y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2)) + y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2)) + + y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4)) + y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4)) + + y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:]) + y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:]) + + y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1 + y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1 + + y_forward = self.sigmoid_forward(y_forward) - 0.5 + y_backward = self.sigmoid_backward(y_backward) - 0.5 + + y = 0.5 * y_forward + 0.5 * y_backward + attn = fea * y + x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2) + x = torch.cat([cls_token, x], dim=0) + return x + + +class CMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = conv_1x1x1(in_features, hidden_features) + self.act = act_layer() + self.fc2 = conv_1x1x1(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class CBlock(nn.Module): + def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True): + super().__init__() + self.norm1 = bn_3d(dim) + self.conv1 = conv_1x1x1(dim, dim, 1) + self.conv2 = conv_1x1x1(dim, dim, 1) + if uni_type == '3d': + print('Use 3d conv for local MHRA') + self.attn = conv_3x3x3(dim, dim, groups=dim) + else: + print('Use 2d conv for local MHRA') + self.attn = conv_1x3x3(dim, dim, groups=dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.add_ffn = add_ffn + if add_ffn: + print('Add FFN in local MHRA') + self.norm2 = bn_3d(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout) + + print('Init zero') + nn.init.constant_(self.conv2.weight, 0.) + nn.init.constant_(self.conv2.bias, 0.) + if add_ffn: + nn.init.constant_(self.mlp.fc2.weight, 0.) + nn.init.constant_(self.mlp.fc2.bias, 0.) + + def forward(self, x): + x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) + if self.add_ffn: + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0, init_zero=True): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + if init_zero: + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.in_proj_bias, 0.) + # nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + # nn.init.xavier_uniform_(self.mlp[-1].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + else: + nn.init.trunc_normal_(self.attn.in_proj_weight, std=.02) + nn.init.constant_(self.attn.in_proj_bias, 0.) + nn.init.trunc_normal_(self.attn.out_proj.weight, std=.02) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.trunc_normal_(self.mlp.c_fc.weight, std=.02) + nn.init.constant_(self.mlp.c_fc.bias, 0.) + nn.init.trunc_normal_(self.mlp.c_proj.weight, std=.02) + nn.init.constant_(self.mlp.c_proj.bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder_uniformer_diff_conv_balance(nn.Module): + def __init__(self, n_layers=4, + uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + balance=0., + use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4, + use_t_pos_embed=True, num_classes=400, init_zero=True): + super().__init__() + + n_layers += uni_layer + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + + self.uni_layer = uni_layer + self.uni_dec = nn.ModuleList([ + CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn) + for i in range(uni_layer) + ]) + + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], init_zero=init_zero) + for i in range(n_layers) + ]) + + self.pre_prompt = pre_prompt + if pre_prompt: + print('Add pre prompt') + self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + + if use_t_conv: + self.t_conv_type = t_conv_type + if t_conv_type == '1d': + print('Use 1d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + if init_zero: + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + print('Use 3d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + if init_zero: + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + else: + self.tconv = None + + self.before_me = before_me + self.after_me = after_me + if before_me or after_me: + assert before_me != after_me + print(f'Use {me_type} attention, Before {before_me}, After {after_me}') + if me_type == 'stm': + me_op = STM + elif me_type == 'dstm': + me_op = DSTM + elif me_type == 'tdn': + me_op = TDN + self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)]) + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + print(F'Balnce weight {balance}') + self.balance = nn.Parameter(torch.ones((n_dim)) * balance) + self.sigmoid = nn.Sigmoid() + + if not init_zero: + nn.init.normal_(self.temporal_cls_token, std=1e-6) + if self.pemb_t is not None: + nn.init.trunc_normal_(self.pemb_t, std=.02) + + def forward(self, clip_feats_all, mode='video'): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + if self.after_me: + origin_clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.before_me: + # contain residual + clip_feats[i] = self.me[i](clip_feats[i]) + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + if self.t_conv_type == '1d': + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + else: + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + if self.pemb_t is not None and mode == 'video': + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + if self.after_me: + clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i]) + if i < self.uni_layer: + # L, N, T, C + L, N, T, C = clip_feats[i].shape + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + if self.pre_prompt: + pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + for i in range(len(self.dec)): + if i < self.uni_layer: + pre_x = self.dec[i](pre_x, clip_feats[i]) + elif i == self.uni_layer: + clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0) + x = self.dec[i](x, clip_feats[i]) + else: + x = self.dec[i](x, clip_feats[i]) + else: + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + # real residual + # L, N, T, C + residual = clip_feats_all[-1][0].mean(1) + weight = self.sigmoid(self.balance) + + # return self.proj((1 - weight) * x[0, :, :] + weight * residual) + return (1 - weight) * x[0, :, :] + weight * residual + + +if __name__ == '__main__': + model = TransformerDecoder_uniformer_diff_conv_balance() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(8): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + tuple(torch.zeros([L, N, T, C]) for _ in range(3))) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model) + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/model.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model.py new file mode 100644 index 0000000..cdeac9b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model.py @@ -0,0 +1,426 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +from . import evl_utils +from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance +from einops import rearrange +from ipdb import set_trace +from copy import deepcopy +from .clip_decoders import CaptionDecoder + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor): + return self.resblocks(x) + + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, + backbone='vit_2plus1d_dw_bias_b16', + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + init_zero=True, + use_capdecoder=False, + ): + super().__init__() + + # All assertions is for adhoc clip_kc and should be removed + # assert vision_layers == 12, vision_layers + assert image_resolution == 224, image_resolution + # assert vision_patch_size == 32, vision_patch_size + assert vision_width == n_dim, (vision_width, n_dim) + + self.vision_width = n_dim + + self.context_length = context_length + + vision_heads = vision_width // 64 + self.visual = evl_utils.__dict__[backbone](pretrained=False, init_zero=init_zero, num_frames=t_size, t_size=t_size) + self.evl = TransformerDecoder_uniformer_diff_conv_balance( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + pre_prompt=pre_prompt, balance=balance, + after_me=after_me, before_me=before_me, + me_type=me_type, me_reduction=me_reduction, + init_zero=init_zero, + ) + self.visual_ln_post = nn.LayerNorm(n_dim) + scale = n_dim ** -0.5 + self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim)) + self.return_qk = use_image_attnmap + self.return_num = n_layers + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask(), + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.embed_dim = embed_dim + + # We seperate the mask embedding to load pretrained model + self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width)) + + # # To keep the num_embeddings unchanged, we add this to embedded text + # self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width)) + + ##################### try cap decoder ############### + self.use_capdecoder = use_capdecoder + if self.use_capdecoder: + self.caption_decoder = CaptionDecoder( + n_layers=6, + transformer_width=transformer_width, + vision_width=vision_width, + transformer_heads=transformer_heads, + vocab_size=vocab_size, + ) + ## we do not use logits, just text feats ## + self.caption_decoder.predictor = nn.Identity() + ##################################################### + + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + nn.init.normal_(self.text_mask_embedding, std=0.02) + # nn.init.constant_(self.eot_token_embedding, 0.0) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + nn.init.constant_(self.visual_ln_post.weight, 1.0) + nn.init.constant_(self.visual_ln_post.bias, 0.0) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_video(self, video, return_all_feats=False, masked_indices=None, mode="video"): + # if len(video.size()) == 4: #[bs * T, C, H, W] + #set_trace() + # frames = 12 + # video = rearrange(video, '(b t) c h w -> b t c h w', b=int(video.size(0)/frames), t=frames) + # video = rearrange(video, 'b t c h w -> b c t h w') + + #set_trace() + # video: [N, C, T, H, W] + features = self.visual(video, return_num=self.return_num, masked_indices=masked_indices, mode=mode) + x = self.visual_ln_post(self.evl(features)) + if self.visual_proj is not None: + x = x @ self.visual_proj + + if return_all_feats: + return x, features[-1] + # return x, features[-1], features[-1][0] # [N, T, C], [L, N, T, C], [N, T, C] + + return x + + def encode_text(self, text, masked_indices=None, return_all_feats=False): + # assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \ + # "The last token of each sentence should be eot_token, check the input" + + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + # x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding + + if masked_indices is not None: + x[masked_indices] = self.text_mask_embedding + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + if self.text_projection is not None: + feats = feats @ self.text_projection + + if return_all_feats: + return feats, x + + return feats + + def forward(self, video, text): + video_features = self.encode_video(video) + text_features = self.encode_text(text) + + # normalized features + video_features = video_features / video_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_video= logit_scale * video_features @ text_features.t() + logits_per_text = logits_per_video.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_video, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + +def interpolate_temporal_pos_embed(pos_embed, T): + # [1, t, d] -> [1, d, t] + pos_embed = pos_embed.transpose(-2, -1) + # [1, d, t] -> [1, d, T] + pos_embed = F.interpolate(pos_embed, size=(T), mode='linear') + # [1, d, T] -> [1, T, d] + return pos_embed.transpose(-2, -1) + +def interploate_rpb(rpb, T): + t1 = T * 2 - 1 + rpb = rpb.transpose(0, 1).unsqueeze(0) + rpb = F.interpolate(rpb, size=(t1), mode='linear') + return rpb.squeeze(0).transpose(0, 1) + +def build_model( + state_dict: dict, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False, + init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None, + ): + vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict + + if "visual.proj" in state_dict: + state_dict["visual_proj"] = state_dict["visual.proj"] + state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"] + state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"] + del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"] + # new_state_dict = OrderedDict() + # for k, v in state_dict.items(): + # if k.startswith("backbone."): + # k = k.replace("backbone.", "visual.") + + # new_state_dict[k] = v + + # state_dict = new_state_dict + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + # embed_dim = 512 + # context_length = 77 + # vocab_size = 49408 + # transformer_width = 512 + # transformer_layers = 12 + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + vision_width = state_dict["visual_proj"].shape[0] + n_dim = vision_width + if vision_width == 768: + backbone = "vit_2plus1d_dw_bias_b16" + n_head = 12 + elif vision_width == 1024: + backbone = "vit_2plus1d_dw_bias_l14" + n_head = 16 + else: + raise NotImplementedError + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers, + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone, + init_zero=init_zero, use_capdecoder=use_capdecoder, + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + ###### convert_weights(model)##### + ##### interpolate pos embedding ###### + temporal_key = 'visual.temporal_positional_embedding' + temporal_key2 = 'evl.pemb_t' + + if temporal_key in state_dict and t_size != state_dict[temporal_key].size(1): + state_dict[temporal_key] = interpolate_temporal_pos_embed(state_dict[temporal_key], t_size) + state_dict[temporal_key2] = interpolate_temporal_pos_embed(state_dict[temporal_key2], t_size) + for kk, vv in state_dict.items(): + if 'rpb_t' in kk: + size_old = state_dict[kk].shape + state_dict[kk] = interploate_rpb(vv, t_size) + size_new = state_dict[kk].shape + print('Interpolating' ,kk, size_old, '-->', size_new) + # set_trace() + if mergeclip: + assert 0.0 <= mergeweight <= 1.0 + assert clip_state_dict is not None + clip_sd = {k: v.cpu() for k, v in clip_state_dict.items()} + new_sd = deepcopy(state_dict) + for k in new_sd: + if k not in clip_sd: + continue + if any(x in k for x in clip_sd.keys()): + print('merging: ', k) + new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + + ############## only merge the clip text features, this is for ActivityNet ########### + # if 'visual' in k: + # new_sd[k] = clip_sd[k] + # else: + # new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + ################################################################################ + + ############## only merge the clip visual features, this is for MSVD ########### + # if 'visual' in k: + # new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + # else: + # new_sd[k] = clip_sd[k] + ################################################################################ + state_dict = new_sd + + + # strict=False, for parameters of decoder + # assert False, (len(model.state_dict()), len(state_dict)) + if not no_pretrain: + msg = model.load_state_dict(state_dict, strict=False) + print(msg) + return model.eval() diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_freeze.py new file mode 100644 index 0000000..b89aabb --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_freeze.py @@ -0,0 +1,142 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_l_sparse16(pretrained=True): + # 16x224x224 + # k400 1x1: 86.5 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l_sparse32(pretrained=True): + # 32x224x224 + # k400 1x1: 87.0 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l336_sparse32(pretrained=True): + # 32x336x336 + # k400 1x1: 87.4 + model = EVL( + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_l_sparse16() + cal_flops(model, frame=16, size=224) + + # model = vit_l_sparse32() + # cal_flops(model, frame=32, size=224) + + # model = vit_l336_sparse32() + # cal_flops(model, frame=32, size=336) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze.py new file mode 100644 index 0000000..657c677 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze.py @@ -0,0 +1,116 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=False, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, add_residual=add_residual + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.4 + model = EVL( + backbone='vit_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + +def vit_2plus1d_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + # model = vit_b_sparse8() + # cal_flops(model, frame=8, size=224) + + model = vit_2plus1d_b_sparse8() + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze_diff.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze_diff.py new file mode 100644 index 0000000..1d35df8 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/model_no_freeze_diff.py @@ -0,0 +1,110 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder_uniformer_diff_conv_balance + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + n_layers=12, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=4, + uni_type='3d', + add_ffn=True, + t_conv_type='1d', + pre_prompt=True, + balance=0., + after_me=True, + before_me=False, + me_type='dstm', + me_reduction=4, + num_classes=400, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder_uniformer_diff_conv_balance( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + pre_prompt=pre_prompt, balance=balance, + after_me=after_me, before_me=before_me, + me_type=me_type, me_reduction=me_reduction, + num_classes=num_classes + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_2plus1d_diff_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_dw_bias_b16', + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + num_classes=400, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_2plus1d_diff_b_sparse8() + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc2/simple_tokenizer.py b/Downstream/Video-Text-Retrieval/modules/clip_kc2/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc2/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__init__.py new file mode 100644 index 0000000..ca28329 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__init__.py @@ -0,0 +1,2 @@ +from .clip import * +from .evl_utils import * \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..83019a2 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/clip.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/clip.cpython-36.pyc new file mode 100644 index 0000000..039e499 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/clip.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/model.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/model.cpython-36.pyc new file mode 100644 index 0000000..72b1e0b Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/model.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/model_no_freeze_only_global.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/model_no_freeze_only_global.cpython-36.pyc new file mode 100644 index 0000000..50c2a17 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/model_no_freeze_only_global.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/simple_tokenizer.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/simple_tokenizer.cpython-36.pyc new file mode 100644 index 0000000..4e6005f Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/__pycache__/simple_tokenizer.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/bpe_simple_vocab_16e6.txt.gz b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/clip.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/clip.py new file mode 100644 index 0000000..5856632 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/clip.py @@ -0,0 +1,258 @@ +import hashlib +import os +import urllib +import warnings +from typing import Any, Union, List +from pkg_resources import packaging + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +_tokenizer = _Tokenizer() + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load( + name: str, + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, download_root: str = None, + # evl + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None, + ): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + download_root: str + path to download the model files; by default, it uses "~/.cache/clip" + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + ''' + with open(model_path, 'rb') as opened_file: + try: + # loading JIT archive + model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + ''' + init_state_dict = torch.load(model_path, map_location='cpu')['state_dict'] + state_dict = {} + for k, v in init_state_dict.items(): + k = k.replace('clip.','') + state_dict[k] = v + + if not jit: + model = build_model( + state_dict or model.state_dict(), + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain, + init_zero=init_zero, mergeclip=mergeclip, mergeweight=mergeweight, use_capdecoder=use_capdecoder, clip_state_dict=clip_state_dict, + ).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens):] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__init__.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__init__.py new file mode 100644 index 0000000..bede4c0 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__init__.py @@ -0,0 +1,7 @@ +from .evl_module import TransformerDecoder +from .evl_module_uniformer_diff_conv_balance import TransformerDecoder_uniformer_diff_conv_balance +from .clip_vit import vit_b32, vit_b16, vit_l14, vit_l14_336 +from .clip_vit_2plus1d import vit_2plus1d_b32, vit_2plus1d_b16, vit_2plus1d_l14, vit_2plus1d_l14_336 +from .clip_vit_2plus1d_dw_bias import vit_2plus1d_dw_bias_b32, vit_2plus1d_dw_bias_b16, vit_2plus1d_dw_bias_l14, vit_2plus1d_dw_bias_l14_336 +from .clip_vit_fusion import vit_fusion_b32, vit_fusion_b16, vit_fusion_l14, vit_fusion_l14_336 +from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/__init__.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..721fadc Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention.cpython-36.pyc new file mode 100644 index 0000000..c6bb955 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_bias.cpython-36.pyc new file mode 100644 index 0000000..7737ff8 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_module.cpython-36.pyc new file mode 100644 index 0000000..a617cfa Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc new file mode 100644 index 0000000..82bff89 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/attention_module_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit.cpython-36.pyc new file mode 100644 index 0000000..78444b6 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc new file mode 100644 index 0000000..b6375d3 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_2plus1d.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc new file mode 100644 index 0000000..56f7082 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_2plus1d_dw_bias.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_fusion.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_fusion.cpython-36.pyc new file mode 100644 index 0000000..0cfdbe4 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_fusion.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_only_global.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_only_global.cpython-36.pyc new file mode 100644 index 0000000..6f80715 Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/clip_vit_only_global.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/evl_module.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/evl_module.cpython-36.pyc new file mode 100644 index 0000000..dfa9aee Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/evl_module.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc new file mode 100644 index 0000000..e98f8dc Binary files /dev/null and b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/__pycache__/evl_module_uniformer_diff_conv_balance.cpython-36.pyc differ diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention.py new file mode 100644 index 0000000..28ce56d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_bias.py new file mode 100644 index 0000000..006f56a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_bias.py @@ -0,0 +1,201 @@ +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module_bias import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False, rpb=None): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True, rpb=rpb) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True, rpb=rpb) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, rpb=rpb) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, rpb=rpb) diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module.py new file mode 100644 index 0000000..fd4e73b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module.py @@ -0,0 +1,316 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module_bias.py new file mode 100644 index 0000000..331c60b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/attention_module_bias.py @@ -0,0 +1,319 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import _pad, linear, softmax, dropout + +Tensor = torch.Tensor +pad = _pad + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False, + rpb: Tensor = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + - rpb: relative postion bias + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + if rpb is not None: + attn_output_weights = attn_output_weights + rpb + attn_output_weights = softmax(attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit.py new file mode 100644 index 0000000..5b8ea39 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python + +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x, return_qk=False): + if return_qk: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + q, k, attn_output, _ = self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, return_qk=True) + return q, k, attn_output + else: + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, return_qk=False): + if return_qk: + q, k, attn_output = self.attention(self.ln_1(x), return_qk=True) + x = x + self.drop_path(attn_output) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x, q, k + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x) + if i >= self.layers - return_num: + L, NT, C = x.shape + N = NT // T + features.append(x.view(L, N, T, C)) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4, return_qk=True): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_b32(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_b16(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +def vit_l14_336(pretrained=True, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d.py new file mode 100644 index 0000000..3ad38bf --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d.py @@ -0,0 +1,255 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange + +from .attention import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def attention_temporal(self, x): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8): + # temporal + # x: 1+HWT, N, C + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + res_temporal = self.attention_temporal(self.ln_t(xt)) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + res_spatial = self.attention(self.ln_1(xs)) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0.): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, drop_path=dpr[i]) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, num_frames=8, drop_path_rate=0., + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate) + + def forward(self, x, return_num=4): + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, + ) + + return features + + +def vit_2plus1d_b32(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_b16(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_l14_336(pretrained=True, num_frames=8, drop_path_rate=0.): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d_dw_bias.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d_dw_bias.py new file mode 100644 index 0000000..c6007c4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_2plus1d_dw_bias.py @@ -0,0 +1,317 @@ +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +from einops import rearrange +import torch.utils.checkpoint as checkpoint + +from .attention_bias import MultiheadAttention + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model, n_head, attn_mask=None, drop_path=0.0, t_size=8, spatial_size=7): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + print(f'Add RPB: t_size {t_size}, spatial_size {spatial_size}') + self.pos_embed = nn.Conv3d(d_model, d_model, kernel_size=3, stride=1, padding=1, groups=d_model) + # temporal + self.attn_t = MultiheadAttention(d_model, n_head) + self.ln_t = LayerNorm(d_model) + self.rpb_t = nn.Parameter(torch.zeros([t_size * 2 - 1, n_head])) + + idx_tensor_t = torch.zeros([t_size, t_size], dtype=torch.long) + for q in range(t_size): + for k in range(t_size): + offs = q - k + t_size - 1 + idx_tensor_t[q, k] = offs + self.idx_tensor_t = idx_tensor_t + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.rpb = nn.Parameter(torch.zeros([(spatial_size * 2 - 1) ** 2, n_head])) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + idx_tensor = torch.zeros([spatial_size ** 2, spatial_size ** 2], dtype=torch.long) + for q in range(spatial_size ** 2): + qi, qj = q // spatial_size, q % spatial_size + for k in range(spatial_size ** 2): + ki, kj = k // spatial_size, k % spatial_size + i_offs = qi - ki + spatial_size - 1 + j_offs = qj - kj + spatial_size - 1 + idx_tensor[q, k] = i_offs * (spatial_size * 2 - 1) + j_offs + self.idx_tensor = idx_tensor + + # init zero + print('Init zero for (2+1)d') + nn.init.constant_(self.pos_embed.weight, 0) + nn.init.constant_(self.pos_embed.bias, 0) + nn.init.constant_(self.attn_t.in_proj_weight, 0) + nn.init.constant_(self.attn_t.in_proj_bias, 0) + nn.init.constant_(self.attn_t.out_proj.weight, 1) + nn.init.constant_(self.attn_t.out_proj.bias, 0) + + def attention(self, x, rpb=None): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def attention_temporal(self, x, rpb=None): + self.attn_mask = None + return self.attn_t(x, x, x, need_weights=False, attn_mask=self.attn_mask, rpb=rpb)[0] + + def forward(self, x, T=8, mode='video'): + # temporal + # x: 1+HWT, N, C + # pos_emb + tmp_x = x[1:, :, :] + LT, N, C = tmp_x.shape + L = LT // T + H = W = int(L ** 0.5) + tmp_x = tmp_x.view(H, W, T, N, C).permute(3, 4, 2, 0, 1) + tmp_x = tmp_x + self.pos_embed(tmp_x) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 2, 0, 1).view(LT, N, C) + x[1:, :, :] = tmp_x + + xt = x[1:, :, :] + _, N, C = xt.shape + xt = rearrange(xt, '(l t) n c -> t (n l) c', n=N, t=T) + # no rpb_t for image + if mode == 'image': + rpb_t = None + else: + # rpb_t: T, T, H => B*H, T, T + self.idx_tensor_t = self.idx_tensor_t.to(xt.device) + rpb_t = self.rpb_t[self.idx_tensor_t].permute(2, 0, 1).repeat(N*L, 1, 1) + res_temporal = self.attention_temporal(self.ln_t(xt), rpb=rpb_t) + res_temporal = rearrange(res_temporal, 't (n l) c -> (l t) n c', n=N, t=T) + xt = x[1:, :, :] + self.drop_path(res_temporal) + + # spatial + init_cls_token = x[:1, :, :] + cls_token = init_cls_token.repeat(1, T, 1).view(1, T*N, C) + xs = rearrange(xt, '(l t) n c -> l (t n) c', n=N, t=T) + xs = torch.cat((cls_token, xs), 0) + # rpb: L, L, H => B*H, L+1, L+1 + rpb = torch.zeros((self.n_head, L+1, L+1), device=xs.device, dtype=xs.dtype) + self.idx_tensor = self.idx_tensor.to(xs.device) + rpb[:, 1:, 1:] = self.rpb[self.idx_tensor].permute(2, 0, 1) + rpb = rpb.repeat(T*N, 1, 1) + res_spatial = self.attention(self.ln_1(xs), rpb=rpb) + + # Taking care of CLS token + cls_token = res_spatial[0, :, :] + cls_token = rearrange(cls_token, '(t n) c -> t n c', n=N) + cls_token = torch.mean(cls_token, 0, True) # averaging for every frame + res_spatial = res_spatial[1:, :, :] + res_spatial = rearrange(res_spatial, 'l (t n) c -> (l t) n c', n=N) + x = x + self.drop_path(torch.cat((cls_token, res_spatial), 0)) + + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__(self, width, layers, heads, attn_mask=None, drop_path_rate=0., t_size=8, spatial_size=7): + super().__init__() + self.width = width + self.layers = layers + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock(width, heads, attn_mask, + drop_path=dpr[i], t_size=t_size, spatial_size=spatial_size) for i in range(layers) + ]) + + def forward(self, x, return_num=4, T=8, mode='video'): + features = [] + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T=T, mode=mode) + if i >= self.layers - return_num: + # LT + 1, N, C + LT, N, C = x.shape + L = (LT - 1) // T + cls_x, tmp_x = x[:1], x[1:] + cls_x = cls_x.unsqueeze(2).repeat(1, 1, T, 1) + tmp_x = tmp_x.reshape(L, T, N, C).permute(0, 2, 1, 3) # L, N, T, C + tmp_x = torch.cat([cls_x, tmp_x], dim=0 )# L + 1, N, T, C + features.append(tmp_x) + return features + + +class VisionTransformer(nn.Module): + def __init__( + self, input_resolution, patch_size, width, layers, heads, output_dim, + num_frames=8, drop_path_rate=0., t_size=8, spatial_size=7 + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.temporal_positional_embedding = nn.Parameter(torch.zeros(1, num_frames, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate, t_size=t_size, spatial_size=spatial_size) + + def forward(self, x, return_num=4, mode='video'): + if len(x.size()) == 5: + N, C, T, H, W = x.shape + x = x.permute(0, 2, 1, 3, 4).reshape(N * T, C, H, W) + + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + + cls_tokens = x[:N, :1, :] + x = x[:, 1:] + # add temporal position embedding for video + if mode == 'video': + x = rearrange(x, '(b t) n c -> (b n) t c', b=N, t=T) + x = x + self.temporal_positional_embedding + x = rearrange(x, '(b n) t c -> b (n t) c', b=N, t=T) + else: + pass + x = torch.cat((cls_tokens, x), dim=1) + + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + features = self.transformer( + x, return_num=return_num, T=T, mode=mode + ) + + return features + + +def vit_2plus1d_dw_bias_b32(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_b16(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + spatial_size=14, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +def vit_2plus1d_dw_bias_l14_336(pretrained=True, num_frames=8, drop_path_rate=0., t_size=8): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + num_frames=num_frames, + drop_path_rate=drop_path_rate, + t_size=t_size, + + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + model.load_state_dict(state_dict, strict=False) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + + model = vit_2plus1d_dw_bias_b32(pretrained=True) + + flops = FlopCountAnalysis(model, torch.rand(4, 3, 8, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_fusion.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_fusion.py new file mode 100644 index 0000000..aa6932c --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_fusion.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention +from ipdb import set_trace + + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class Local_MHRA(nn.Module): + def __init__(self, d_model, dw_reduction=1.5, pos_kernel_size=3): + super().__init__() + + padding = pos_kernel_size // 2 + re_d_model = int(d_model // dw_reduction) + self.pos_embed = nn.Sequential( + nn.BatchNorm3d(d_model), + nn.Conv3d(d_model, re_d_model, kernel_size=1, stride=1, padding=0), + nn.Conv3d(re_d_model, re_d_model, kernel_size=(pos_kernel_size, 1, 1), stride=(1, 1, 1), padding=(padding, 0, 0), groups=re_d_model), + nn.Conv3d(re_d_model, d_model, kernel_size=1, stride=1, padding=0), + ) + + # init zero + print('Init zero for Conv in pos_emb') + nn.init.constant_(self.pos_embed[3].weight, 0) + nn.init.constant_(self.pos_embed[3].bias, 0) + + def forward(self, x): + return self.pos_embed(x) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, drop_path=0.0, + dw_reduction=1.5, + ): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.lmhra1 = Local_MHRA(d_model, dw_reduction=dw_reduction) + self.lmhra2 = Local_MHRA(d_model, dw_reduction=dw_reduction) + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8): + # x: 1+HW, NT, C + # Local MHRA + tmp_x = x[1:, :, :] + L, NT, C = tmp_x.shape + N = NT // T + H = W = int(L ** 0.5) + tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous() + tmp_x = tmp_x + self.drop_path(self.lmhra1(tmp_x)) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C) + x = torch.cat([x[:1, :, :], tmp_x], dim=0) + # MHSA + x = x + self.drop_path(self.attention(self.ln_1(x))) + # Local MHRA + tmp_x = x[1:, :, :] + tmp_x = tmp_x.view(H, W, N, T, C).permute(2, 4, 3, 0, 1).contiguous() + tmp_x = tmp_x + self.drop_path(self.lmhra2(tmp_x)) + tmp_x = tmp_x.view(N, C, T, L).permute(3, 0, 2, 1).contiguous().view(L, NT, C) + x = torch.cat([x[:1, :, :], tmp_x], dim=0) + # FFN + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Extractor(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, + mlp_factor=4.0, dropout=0.0, drop_path=0.0, + ): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + # zero init + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0., + t_size=8, dw_reduction=2, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + super().__init__() + self.T = t_size + self.return_list = return_list + # Backbone + b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, attn_mask, + drop_path=b_dpr[i], + dw_reduction=dw_reduction, + ) for i in range(layers) + ]) + + # Extractor + assert n_layers == len(return_list) + self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim)) + self.dpe = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.dpe: + nn.init.constant_(m.bias, 0.) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + Extractor( + n_dim, n_head, mlp_factor=mlp_factor, + dropout=mlp_dropout[i], drop_path=dpr[i], + ) for i in range(n_layers) + ]) + # # projection + # self.proj = nn.Sequential( + # nn.LayerNorm(n_dim), + # nn.Dropout(cls_dropout), + # nn.Linear(n_dim, num_classes), + # ) + self.balance = nn.Parameter(torch.zeros((n_dim))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, mode='video', return_all_feats=False): + if mode == 'video': + T_down = self.T + else: + T_down = 1 + L, NT, C = x.shape + N = NT // T_down + H = W = int((L - 1) ** 0.5) + assert H * W == L - 1 + cls_token = self.temporal_cls_token.repeat(1, N, 1) + + j = -1 + for i, resblock in enumerate(self.resblocks): + x = resblock(x, T_down) + if i in self.return_list: + j += 1 + tmp_x = x.clone() + tmp_x = tmp_x.view(L, N, T_down, C) + # dpe + _, tmp_feats = tmp_x[:1], tmp_x[1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W) + tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1) + tmp_x[1:] = tmp_x[1:] + tmp_feats + # enhancer + tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + cls_token = self.dec[j](cls_token, tmp_x) + + weight = self.sigmoid(self.balance) + residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C + # return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual) + feats = (1 - weight) * cls_token[0, :, :] + weight * residual + if return_all_feats: + return feats, x.view(L, N, T_down, C) + + return feats + + +class VisionTransformer(nn.Module): + def __init__( + self, + # backbone + input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0., + t_size=8, kernel_size=3, dw_reduction=1.5, + # extractor + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + padding = (kernel_size - 1) // 2 + self.conv1 = nn.Conv3d(3, width, (kernel_size, patch_size, patch_size), (2, patch_size, patch_size), (padding, 0, 0), bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer( + width, layers, heads, dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, t_size=t_size // 2, + return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, num_classes=num_classes, + ) + + def forward(self, x, mode='video', return_all_feats=False): + # taken from https://github.com/facebookresearch/omnivore/blob/main/omnivore/models/swin_transformer_3d.py#L703 + if mode == 'image': # for image, stride 2 + # ! Use replicate here + x = F.pad(x, (0, 0, 0, 0, 0, 1), mode="replicate") + + x = self.conv1(x) # shape = [*, width, grid, grid] + N, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + out = self.transformer(x, mode=mode, return_all_feats=return_all_feats) + return out + + +def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + print(f'Ignore: {k}') + continue + print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + +def clip_load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + # print(k, k in state_dict_3d, k in state_dict) + if k in state_dict and k in state_dict_3d and state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + print(f'Ignore: {k}') + continue + print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + +def vit_fusion_b32( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_fusion_b16( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_fusion_l14( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_fusion_l14_336( + pretrained=True, + t_size=16, dw_reduction=1.5, backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + print('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 16 + + model = vit_fusion_b16( + pretrained=False, + t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4, + dw_reduction=1.5, + ) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_only_global.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_only_global.py new file mode 100644 index 0000000..6408ef1 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/clip_vit_only_global.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + +import logging +logger = logging.getLogger(__name__) + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, drop_path=0.0, + ): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + logger.info(f'Drop path rate: {drop_path}') + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8, use_checkpoint=False): + # x: 1+HW, NT, C + # MHSA + if use_checkpoint: + attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x)) + x = x + self.drop_path(attn_out) + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + # FFN + if use_checkpoint: + mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x)) + x = x + self.drop_path(mlp_out) + else: + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Extractor(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, + mlp_factor=4.0, dropout=0.0, drop_path=0.0, + ): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + logger.info(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + # zero init + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + super().__init__() + self.T = t_size + self.return_list = return_list + # Backbone + b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, attn_mask, + drop_path=b_dpr[i], + ) for i in range(layers) + ]) + # checkpoint + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + logger.info(f'Use checkpoint: {self.use_checkpoint}') + logger.info(f'Checkpoint number: {self.checkpoint_num}') + + # Extractor + assert n_layers == len(return_list) + self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim)) + self.dpe = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.dpe: + nn.init.constant_(m.bias, 0.) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + Extractor( + n_dim, n_head, mlp_factor=mlp_factor, + dropout=mlp_dropout[i], drop_path=dpr[i], + ) for i in range(n_layers) + ]) + # # projection + # self.proj = nn.Sequential( + # nn.LayerNorm(n_dim), + # nn.Dropout(cls_dropout), + # nn.Linear(n_dim, num_classes), + # ) + self.balance = nn.Parameter(torch.zeros((n_dim))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, mode='video', return_all_feats=False): + if mode == 'video': + T_down = self.T + else: + T_down = 1 + L, NT, C = x.shape + N = NT // T_down + H = W = int((L - 1) ** 0.5) + cls_token = self.temporal_cls_token.repeat(1, N, 1) + + j = -1 + for i, resblock in enumerate(self.resblocks): + if self.use_checkpoint and i < self.checkpoint_num[0]: + x = resblock(x, self.T, use_checkpoint=True) + else: + x = resblock(x, T_down) + if i in self.return_list: + j += 1 + tmp_x = x.clone() + tmp_x = tmp_x.view(L, N, T_down, C) + # dpe + _, tmp_feats = tmp_x[:1], tmp_x[1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W) + tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1) + tmp_x[1:] = tmp_x[1:] + tmp_feats + # enhancer + tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + cls_token = self.dec[j](cls_token, tmp_x) + + weight = self.sigmoid(self.balance) + residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C + # return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual) + feats = (1 - weight) * cls_token[0, :, :] + weight * residual + if return_all_feats: + return feats, x.view(L, N, T_down, C) + + return feats + + +class VisionTransformer(nn.Module): + def __init__( + self, + # backbone + input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + # extractor + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer( + width, layers, heads, + backbone_drop_path_rate=backbone_drop_path_rate, + use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size, + return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, num_classes=num_classes, + ) + + def forward(self, x, mode='video', return_all_feats=False): + x = self.conv1(x) # shape = [*, width, grid, grid] + N, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + out = self.transformer(x, mode=mode, return_all_feats=return_all_feats) + return out + + +def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + logger.info(f'Ignore: {k}') + continue + logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + + +def vit_only_global_b32( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_b16( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14_336( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 8 + + model = vit_only_global_l14( + pretrained=False, + t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4, + use_checkpoint=True, checkpoint_num=[0], + ) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + logger.info(flop_count_table(flops, max_depth=1)) + logger.info(time.time()-s) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module.py new file mode 100644 index 0000000..a1f688e --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.xavier_uniform_(self.mlp[-1].weight) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder(nn.Module): + def __init__(self, n_layers=4, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, + use_t_conv=True, use_t_pos_embed=True, num_classes=400, + add_residual=False, + ): + super().__init__() + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + self.proj = nn.Sequential( + nn.LayerNorm(n_dim), + nn.Dropout(cls_dropout), + nn.Linear(n_dim, num_classes), + ) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.add_residual = add_residual + print(f'Add residual {add_residual}') + + if use_t_conv: + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + self.tconv = None + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + self.t_size = t_size + + def forward(self, clip_feats_all): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + if self.pemb_t is not None: + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + if self.add_residual: + residual = clip_feats_all[-1][0].mean(1) + return self.proj(x[0, :, :] + residual) + else: + return self.proj(x[0, :, :]) + + +if __name__ == '__main__': + model = TransformerDecoder() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(4): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + torch.zeros([L, N, T, C])) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module_uniformer_diff_conv_balance.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module_uniformer_diff_conv_balance.py new file mode 100644 index 0000000..09e6aa6 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/evl_utils/evl_module_uniformer_diff_conv_balance.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python + +from collections import OrderedDict + +from timm.models.layers import trunc_normal_, DropPath +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class STM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(STM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + self.shift = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + + # apply transformation conv to t+1 feature + conv_bottleneck = self.shift(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + reshape_conv_bottleneck = conv_bottleneck.view((-1, T) + conv_bottleneck.size()[1:]) + __, tPlusone_fea = reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + + # motion fea = t+1_fea - t_fea + # pad the last timestamp + diff_fea = tPlusone_fea - t_fea # N, T-1, C//r, H, W + # pad = (0,0,0,0,0,0,0,1) + diff_fea_pluszero = F.pad(diff_fea, self.pad, mode="constant", value=0) # N, T, C//r, H, W + diff_fea_pluszero = diff_fea_pluszero.view((-1,) + diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + y = self.recover(diff_fea_pluszero) # NT, C, H, W + + # reshape + y = y.reshape(N, T, C, L).permute(3, 0, 1, 2) + y = torch.cat([cls_token, y], dim=0) + return y + + +class DSTM(nn.Module): + def __init__(self, n_dim, reduction=4): + super(DSTM, self).__init__() + reduced_c = n_dim // reduction + self.reduce = nn.Sequential( + nn.Conv2d(n_dim, reduced_c, kernel_size=1, bias=False), + nn.BatchNorm2d(reduced_c) + ) + # DW(T+1) - T + self.shift_pre = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_pre = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_pre = (0, 0, 0, 0, 0, 0, 0, 1) + # DW(T-1) - T + self.shift_back = nn.Conv2d(reduced_c, reduced_c, kernel_size=3, padding=1, groups=reduced_c, bias=False) + self.recover_back = nn.Sequential( + nn.Conv2d(reduced_c, n_dim, kernel_size=1, bias=False), + nn.BatchNorm2d(n_dim) + ) + self.pad_back = (0, 0, 0, 0, 0, 0, 0, 1) + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + bottleneck = self.reduce(fea) # NT, C//r, H, W + + # t feature + reshape_bottleneck = bottleneck.view((-1, T) + bottleneck.size()[1:]) # N, T, C//r, H, W + pre_t_fea, __ = reshape_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + back_t_fea, __ = reshape_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + # apply transformation conv to t+1/t-1 feature + pre_conv_bottleneck = self.shift_pre(bottleneck) # NT, C//r, H, W + back_conv_bottleneck = self.shift_back(bottleneck) # NT, C//r, H, W + # reshape fea: N, T, C//r, H, W + pre_reshape_conv_bottleneck = pre_conv_bottleneck.view((-1, T) + pre_conv_bottleneck.size()[1:]) + back_reshape_conv_bottleneck = back_conv_bottleneck.view((-1, T) + back_conv_bottleneck.size()[1:]) + __, tPlusone_fea = pre_reshape_conv_bottleneck.split([1, T-1], dim=1) # N, T-1, C//r, H, W + tMinusone_fea, _ = back_reshape_conv_bottleneck.split([T-1, 1], dim=1) # N, T-1, C//r, H, W + # pre_fea = t+1_fea - t_fea + # back_fea = t-1_fea - t_fea + pre_diff_fea = tPlusone_fea - pre_t_fea # N, T-1, C//r, H, W + back_diff_fea = tMinusone_fea - back_t_fea # N, T-1, C//r, H, W + # pad the last/first timestamp + pre_diff_fea_pluszero = F.pad(pre_diff_fea, self.pad_pre, mode="constant", value=0) # N, T, C//r, H, W + pre_diff_fea_pluszero = pre_diff_fea_pluszero.view((-1,) + pre_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + back_diff_fea_pluszero = F.pad(back_diff_fea, self.pad_back, mode="constant", value=0) # N, T, C//r, H, W + back_diff_fea_pluszero = back_diff_fea_pluszero.view((-1,) + back_diff_fea_pluszero.size()[2:]) # NT, C//r, H, W + # recover channel + pre_y = self.recover_pre(pre_diff_fea_pluszero) # NT, C, H, W + back_y = self.recover_back(back_diff_fea_pluszero) # NT, C, H, W + # reshape + y = (pre_y + back_y).reshape(N, T, C, L).permute(3, 0, 1, 2) + + # cat cls_token + y = torch.cat([cls_token, y], dim=0) + return y + + +class TDN(nn.Module): + def __init__(self, channel, n_segment=8, index=1, reduction=4): + super(TDN, self).__init__() + self.channel = channel + self.reduction = reduction + self.n_segment = n_segment + self.stride = 2**(index-1) + self.conv1 = nn.Conv2d(in_channels=self.channel, + out_channels=self.channel//self.reduction, + kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction, + kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False) + + self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2) + self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_forward = nn.Sigmoid() + + self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1) + self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4) + + self.sigmoid_backward = nn.Sigmoid() + + self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1) + self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0) + + self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(num_features=self.channel) + + self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction, + out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False) + self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction) + + def spatial_pool(self, x): + nt, channel, height, width = x.size() + input_x = x + # [N, C, H * W] + input_x = input_x.view(nt, channel, height * width) + # [N, 1, C, H * W] + input_x = input_x.unsqueeze(1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = context_mask.view(nt, 1, height * width) + # [N, 1, H * W] + context_mask = self.softmax(context_mask) + context_mask = context_mask.view(nt,1,height,width) + return context_mask + + def forward(self, x): + # x: [L, N, T, C] + cls_token, x = x[:1], x[1:] + L, N, T, C = x.shape + H = W = int(L**0.5) + fea = x.permute(1, 2, 3, 0).reshape(N*T, C, H, W) + + bottleneck = self.conv1(fea) # nt, c//r, h, w + bottleneck = self.bn1(bottleneck) # nt, c//r, h, w + reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w + + t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w + _, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w + + conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w + reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w + _, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w + tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w + diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w + diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w + diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w + diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w + diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w + y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1 + y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1 + + y_forward_smallscale4 = diff_fea_pluszero_forward + y_backward_smallscale4 = diff_fea_pluszero_backward + y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2)) + y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2)) + + y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4)) + y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4)) + + y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:]) + y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:]) + + y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1 + y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1 + + y_forward = self.sigmoid_forward(y_forward) - 0.5 + y_backward = self.sigmoid_backward(y_backward) - 0.5 + + y = 0.5 * y_forward + 0.5 * y_backward + attn = fea * y + x = x + attn.reshape(N, T, C, L).permute(3, 0, 1, 2) + x = torch.cat([cls_token, x], dim=0) + return x + + +class CMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = conv_1x1x1(in_features, hidden_features) + self.act = act_layer() + self.fc2 = conv_1x1x1(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class CBlock(nn.Module): + def __init__(self, dim, mlp_ratio=4., dropout=0., drop_path=0., uni_type='3d', add_ffn=True): + super().__init__() + self.norm1 = bn_3d(dim) + self.conv1 = conv_1x1x1(dim, dim, 1) + self.conv2 = conv_1x1x1(dim, dim, 1) + if uni_type == '3d': + print('Use 3d conv for local MHRA') + self.attn = conv_3x3x3(dim, dim, groups=dim) + else: + print('Use 2d conv for local MHRA') + self.attn = conv_1x3x3(dim, dim, groups=dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.add_ffn = add_ffn + if add_ffn: + print('Add FFN in local MHRA') + self.norm2 = bn_3d(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=nn.GELU, drop=dropout) + + print('Init zero') + nn.init.constant_(self.conv2.weight, 0.) + nn.init.constant_(self.conv2.bias, 0.) + if add_ffn: + nn.init.constant_(self.mlp.fc2.weight, 0.) + nn.init.constant_(self.mlp.fc2.bias, 0.) + + def forward(self, x): + x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) + if self.add_ffn: + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ResidualDecoderBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, + mlp_factor: float = 4.0, dropout: float = 0.0, drop_path: float = 0.0): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + print(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + nn.init.xavier_uniform_(self.attn.in_proj_weight) + # nn.init.xavier_uniform_(self.attn.out_proj.weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + # nn.init.xavier_uniform_(self.mlp[-1].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class TransformerDecoder_uniformer_diff_conv_balance(nn.Module): + def __init__(self, n_layers=4, + uni_layer=4, uni_type='3d', add_ffn=True, t_conv_type='1d', pre_prompt=True, + n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + balance=0., + use_t_conv=True, after_me=True, before_me=False, me_type='dstm', me_reduction=4, + use_t_pos_embed=True, num_classes=400): + super().__init__() + + n_layers += uni_layer + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + + self.uni_layer = uni_layer + self.uni_dec = nn.ModuleList([ + CBlock(n_dim, mlp_ratio=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i], uni_type=uni_type, add_ffn=add_ffn) + for i in range(uni_layer) + ]) + + self.dec = nn.ModuleList([ + ResidualDecoderBlock(n_dim, n_head, mlp_factor=mlp_factor, dropout=mlp_dropout[i], drop_path=dpr[i]) + for i in range(n_layers) + ]) + self.proj = nn.Sequential( + nn.LayerNorm(n_dim), + nn.Dropout(cls_dropout), + nn.Linear(n_dim, num_classes), + ) + + self.pre_prompt = pre_prompt + if pre_prompt: + print('Add pre prompt') + self.pre_temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + self.temporal_cls_token = nn.Parameter(torch.zeros(n_dim)) + + if use_t_conv: + self.t_conv_type = t_conv_type + if t_conv_type == '1d': + print('Use 1d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv1d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + m.weight.data[...] = torch.Tensor([0, 1, 0]) + else: + print('Use 3d t_conv for CPE') + self.tconv = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.tconv: + nn.init.constant_(m.bias, 0.) + else: + self.tconv = None + + self.before_me = before_me + self.after_me = after_me + if before_me or after_me: + assert before_me != after_me + print(f'Use {me_type} attention, Before {before_me}, After {after_me}') + if me_type == 'stm': + me_op = STM + elif me_type == 'dstm': + me_op = DSTM + elif me_type == 'tdn': + me_op = TDN + self.me = nn.ModuleList([me_op(n_dim, reduction=me_reduction) for i in range(n_layers)]) + + if use_t_pos_embed: + self.pemb_t = nn.Parameter(torch.zeros([n_layers, t_size, n_dim])) + else: + self.pemb_t = None + + print(F'Balnce weight {balance}') + self.balance = nn.Parameter(torch.ones((n_dim)) * balance) + self.sigmoid = nn.Sigmoid() + + def forward(self, clip_feats_all, mode='video'): + # clip_feats_all = clip_feats_all[-len(self.dec):] + # only return n_layers features, save memory + clip_feats = [x for x in clip_feats_all] + if self.after_me: + origin_clip_feats = [x for x in clip_feats_all] + + L, N, T, C = clip_feats[0].size() + x = self.temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + + for i in range(len(clip_feats)): + if self.before_me: + # contain residual + clip_feats[i] = self.me[i](clip_feats[i]) + if self.tconv is not None: + L, N, T, C = clip_feats[i].shape + if self.t_conv_type == '1d': + clip_feats[i] = clip_feats[i].permute(0, 1, 3, 2).flatten(0, 1) # L * N, C, T + clip_feats[i] = self.tconv[i](clip_feats[i]).permute(0, 2, 1).contiguous().view(L, N, T, C) + else: + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.tconv[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + if self.pemb_t is not None and mode == 'video': + clip_feats[i] = clip_feats[i] + self.pemb_t[i] + if self.after_me: + clip_feats[i] = clip_feats[i] + self.me[i](origin_clip_feats[i]) + if i < self.uni_layer: + # L, N, T, C + L, N, T, C = clip_feats[i].shape + H = W = int((L - 1) ** 0.5) + _, tmp_feats = clip_feats[i][:1], clip_feats[i][1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T, H, W) + tmp_feats = self.uni_dec[i](tmp_feats).view(N, C, T, L - 1).permute(3, 0, 2, 1) + clip_feats[i][1:] = clip_feats[i][1:] + tmp_feats + clip_feats[i] = clip_feats[i].permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + + if self.pre_prompt: + pre_x = self.pre_temporal_cls_token.view(1, 1, -1).repeat(1, N, 1) + for i in range(len(self.dec)): + if i < self.uni_layer: + pre_x = self.dec[i](pre_x, clip_feats[i]) + elif i == self.uni_layer: + clip_feats[i] = torch.cat([pre_x, clip_feats[i]], dim=0) + x = self.dec[i](x, clip_feats[i]) + else: + x = self.dec[i](x, clip_feats[i]) + else: + for i in range(len(self.dec)): + x = self.dec[i](x, clip_feats[i]) + + # real residual + # L, N, T, C + residual = clip_feats_all[-1][0].mean(1) + weight = self.sigmoid(self.balance) + + return self.proj((1 - weight) * x[0, :, :] + weight * residual) + + +if __name__ == '__main__': + model = TransformerDecoder_uniformer_diff_conv_balance() + + # construct a fake input to demonstrate input tensor shape + L, N, T, C = 197, 1, 8, 768 # num_image_tokens, video_batch_size, t_size, feature_dim + # we use intermediate feature maps from multiple blocks, so input features should be a list + input_features = [] + for i in range(8): # vit-b has 12 blocks + # every item in input_features contains features maps from a single block + # every item is a tuple containing 3 feature maps: + # (1) block output features (i.e. after mlp) with shape L, N, T, C + # (2) projected query features with shape L, N, T, C + # (3) projected key features with shape L, N, T, C + input_features.append( + tuple(torch.zeros([L, N, T, C]) for _ in range(3))) + # some small optimizations: + # (1) We only decode from the last $n$ blocks so it's good as long as the last $n$ items of input_features is valid and all previous items can be filled with None to save memory. By default $n=4$. + # (2) projected query/key features are optional. If you are using an uncompatible image backbone without query/key (e.g. CNN), you can fill the position with None (i.e. the tuple should be (Tensor, None, None) and set use_image_attnmap=False when constructing the model. + + print(model) + print(model(input_features).shape) # should be N, 400 diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model.py new file mode 100644 index 0000000..910303a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model.py @@ -0,0 +1,443 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + +from . import evl_utils +from .evl_utils import TransformerDecoder_uniformer_diff_conv_balance +from einops import rearrange +from ipdb import set_trace +from copy import deepcopy +# from .clip_decoders import CaptionDecoder + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor): + # if self.use_checkpoint and self.checkpoint_num[1] > 0: + # segments = min(len(self.resblocks), self.checkpoint_num[1]) + # return checkpoint_sequential(self.resblocks, segments, x) + # else: + return self.resblocks(x) + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, + backbone='vit_2plus1d_dw_bias_b16', + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + init_zero=True, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + use_capdecoder=False, + ): + super().__init__() + + # All assertions is for adhoc clip_kc and should be removed + # assert vision_layers == 12, vision_layers + assert image_resolution == 224, image_resolution + # assert vision_patch_size == 32, vision_patch_size + assert vision_width == n_dim, (vision_width, n_dim) + + self.vision_width = n_dim + + self.context_length = context_length + + vision_heads = vision_width // 64 + self.visual = evl_utils.__dict__[backbone]( + pretrained=False, t_size=t_size, mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, n_dim=n_dim, + n_head=n_head, return_list=return_list, drop_path_rate=drop_path_rate, backbone_drop_path_rate=drop_path_rate) + # self.evl = TransformerDecoder_uniformer_diff_conv_balance( + # n_layers=n_layers, n_dim=n_dim, n_head=n_head, + # mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + # mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + # use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + # uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + # pre_prompt=pre_prompt, balance=balance, + # after_me=after_me, before_me=before_me, + # me_type=me_type, me_reduction=me_reduction, + # init_zero=init_zero, + # ) + self.visual_ln_post = nn.LayerNorm(n_dim) + scale = n_dim ** -0.5 + self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim)) + self.return_qk = use_image_attnmap + self.return_num = n_layers + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask(), + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.embed_dim = embed_dim + + # We seperate the mask embedding to load pretrained model + self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width)) + + # # To keep the num_embeddings unchanged, we add this to embedded text + # self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + nn.init.normal_(self.text_mask_embedding, std=0.02) + # nn.init.constant_(self.eot_token_embedding, 0.0) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + nn.init.constant_(self.visual_ln_post.weight, 1.0) + nn.init.constant_(self.visual_ln_post.bias, 0.0) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_video(self, video, return_all_feats=False, masked_indices=None, mode="video"): + # video: [N, C, T, H, W] + feats = self.visual(video, return_all_feats=return_all_feats, mode=mode) + if return_all_feats: + x, feats = feats + x = self.visual_ln_post(x) + if self.visual_proj is not None: + x = x @ self.visual_proj + + if return_all_feats: + return x, feats # [N, C], [L, N, T, C] + + return x + + def encode_text(self, text, masked_indices=None, return_all_feats=False): + # assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \ + # "The last token of each sentence should be eot_token, check the input" + + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + # x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding + + if masked_indices is not None: + x[masked_indices] = self.text_mask_embedding + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + if self.text_projection is not None: + feats = feats @ self.text_projection + + if return_all_feats: + return feats, x + + return feats + + def forward(self, video, text): + video_features = self.encode_video(video) + text_features = self.encode_text(text) + + # normalized features + video_features = video_features / video_features.norm(dim=1, keepdim=True) + text_features = text_features / text_features.norm(dim=1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_video= logit_scale * video_features @ text_features.t() + logits_per_text = logits_per_video.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_video, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name) and not isinstance(l, TransformerDecoder_uniformer_diff_conv_balance): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + +def interpolate_temporal_pos_embed(pos_embed, T): + # [1, t, d] -> [1, d, t] + pos_embed = pos_embed.transpose(-2, -1) + # [1, d, t] -> [1, d, T] + pos_embed = F.interpolate(pos_embed, size=(T), mode='linear') + # [1, d, T] -> [1, T, d] + return pos_embed.transpose(-2, -1) + +def interploate_rpb(rpb, T): + t1 = T * 2 - 1 + rpb = rpb.transpose(0, 1).unsqueeze(0) + rpb = F.interpolate(rpb, size=(t1), mode='linear') + return rpb.squeeze(0).transpose(0, 1) + +def build_model( + state_dict: dict, + # evl + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, no_pretrain=False, + init_zero=True, mergeclip=False, mergeweight=0.5, use_capdecoder=False, clip_state_dict=None, + ): + vit = "visual.proj" in state_dict or "visual.positional_embedding" in state_dict + + if "visual.proj" in state_dict: + state_dict["visual_proj"] = state_dict["visual.proj"] + state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"] + state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"] + del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"] + # new_state_dict = OrderedDict() + # for k, v in state_dict.items(): + # if k.startswith("backbone."): + # k = k.replace("backbone.", "visual.") + + # new_state_dict[k] = v + + # state_dict = new_state_dict + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + # embed_dim = 512 + # context_length = 77 + # vocab_size = 49408 + # transformer_width = 512 + # transformer_layers = 12 + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + ########### add this ############ + # for k, v in state_dict.items(): + # print(k, v.shape) + ################################################ + + vision_width = state_dict["visual_proj"].shape[0] + n_dim = vision_width + if vision_width == 768: + backbone = "vit_only_global_b16" + n_head = 12 + return_list = [8, 9, 10, 11] + # comment this as we always use vit-L/14 + # elif vision_width == 1024 and state_dict["input_resolution"] == 224: + elif vision_width == 1024: + backbone = "vit_only_global_l14" + n_head = 16 + return_list = [20, 21, 22, 23] + elif vision_width == 1024 and state_dict["input_resolution"] == 336: + backbone = "vit_only_global_l14_336" + n_head = 16 + return_list = [20, 21, 22, 23] + else: + raise NotImplementedError + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers, + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, backbone=backbone, + init_zero=init_zero, return_list=return_list, + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + # convert_weights(model) + temporal_key = 'visual.temporal_positional_embedding' + temporal_key2 = 'evl.pemb_t' + + if temporal_key in state_dict and t_size != state_dict[temporal_key].size(1): + state_dict[temporal_key] = interpolate_temporal_pos_embed(state_dict[temporal_key], t_size) + state_dict[temporal_key2] = interpolate_temporal_pos_embed(state_dict[temporal_key2], t_size) + for kk, vv in state_dict.items(): + if 'rpb_t' in kk: + size_old = state_dict[kk].shape + state_dict[kk] = interploate_rpb(vv, t_size) + size_new = state_dict[kk].shape + print('Interpolating' ,kk, size_old, '-->', size_new) + # set_trace() + + # print('$' * 100) + # for k, v in model.state_dict().items(): + # print(k, v.shape) + + + if mergeclip: + assert 0.0 <= mergeweight <= 1.0 + assert clip_state_dict is not None + clip_sd = {k: v.cpu() for k, v in clip_state_dict.items()} + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in clip_sd: + del clip_sd[key] + + ## trick: use this func to transfer 2d clip weights to 3d, and mark back + evl_utils.clip_vit_fusion.clip_load_state_dict(model, clip_sd) + loaded_dict = model.state_dict() + clip_sd_new = {k: v.cpu() for k, v in loaded_dict.items() if k in clip_sd} + + # set_trace() + + new_sd = deepcopy(state_dict) + + for k in new_sd: + if k not in clip_sd_new: + continue + if any(x in k for x in clip_sd_new.keys()): + print('merging: ', k, '\t', clip_sd_new[k].shape, state_dict[k].shape) + new_sd[k] = clip_sd_new[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + + ############## only merge the clip text features, this is for ActivityNet ########### + # if 'visual' in k: + # new_sd[k] = clip_sd[k] + # else: + # new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + ################################################################################ + + ############## only merge the clip visual features, this is for MSVD ########### + # if 'visual' in k: + # new_sd[k] = clip_sd[k] * mergeweight + state_dict[k] * (1.0 - mergeweight) + # else: + # new_sd[k] = clip_sd[k] + ################################################################################ + state_dict = new_sd + + # print('$' * 100) + # for k, v in state_dict.items(): + # print(k, v.shape) + + if not no_pretrain: + # msg = evl_utils.clip_vit_fusion.clip_load_state_dict(model, state_dict) + msg = model.load_state_dict(state_dict, strict=False) + print(msg) + return model.eval() diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_freeze.py new file mode 100644 index 0000000..b89aabb --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_freeze.py @@ -0,0 +1,142 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_l_sparse16(pretrained=True): + # 16x224x224 + # k400 1x1: 86.5 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse16.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l_sparse32(pretrained=True): + # 32x224x224 + # k400 1x1: 87.0 + model = EVL( + backbone='vit_l14', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +def vit_l336_sparse32(pretrained=True): + # 32x336x336 + # k400 1x1: 87.4 + model = EVL( + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400 + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_l336_sparse32.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_l_sparse16() + cal_flops(model, frame=16, size=224) + + # model = vit_l_sparse32() + # cal_flops(model, frame=32, size=224) + + # model = vit_l336_sparse32() + # cal_flops(model, frame=32, size=336) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze.py new file mode 100644 index 0000000..657c677 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze.py @@ -0,0 +1,116 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_l14_336', + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=32, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=False, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False) + + self.evl = TransformerDecoder( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + num_classes=num_classes, add_residual=add_residual + ) + self.return_num = n_layers + + def forward(self, x): + features = self.backbone(x, return_num=self.return_num) + output = self.evl(features) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.4 + model = EVL( + backbone='vit_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + +def vit_2plus1d_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_b16', + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + num_classes=400, + add_residual=True, + ) + if pretrained: + pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_b_sparse8.pyth') + print(f'lodel model from: {pretrained_path}') + state_dict = torch.load(pretrained_path, map_location='cpu') + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + # model = vit_b_sparse8() + # cal_flops(model, frame=8, size=224) + + model = vit_2plus1d_b_sparse8() + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_diff.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_diff.py new file mode 100644 index 0000000..cce17f0 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_diff.py @@ -0,0 +1,112 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +import evl_utils +from evl_utils import TransformerDecoder_uniformer_diff_conv_balance + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + n_layers=12, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_frames=8, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=4, + uni_type='3d', + add_ffn=True, + t_conv_type='1d', + pre_prompt=True, + balance=0., + after_me=True, + before_me=False, + me_type='dstm', + me_reduction=4, + num_classes=400, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone](pretrained=False, num_frames=num_frames, t_size=t_size) + + self.evl = TransformerDecoder_uniformer_diff_conv_balance( + n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, + use_t_conv=use_t_conv, use_t_pos_embed=use_t_pos_embed, + uni_layer=uni_layer, uni_type=uni_type, add_ffn=add_ffn, t_conv_type=t_conv_type, + pre_prompt=pre_prompt, balance=balance, + after_me=after_me, before_me=before_me, + me_type=me_type, me_reduction=me_reduction, + num_classes=num_classes + ) + self.return_num = n_layers + + def forward(self, x, mode='image'): + features = self.backbone(x, return_num=self.return_num, mode=mode) + output = self.evl(features, mode=mode) + + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_2plus1d_diff_b_sparse8(pretrained=True): + # 8x224x224 + # k400 1x1: 82.5 + model = EVL( + backbone='vit_2plus1d_dw_bias_b16', + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_frames=8, + t_size=8, + use_t_conv=True, + use_t_pos_embed=True, + uni_layer=0, + uni_type='2d', + add_ffn=False, + t_conv_type='3d', + pre_prompt=False, + balance=0., + after_me=True, + before_me=False, + me_type='stm', + me_reduction=4, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'vit_2plus1d_diff_b_sparse8.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_2plus1d_diff_b_sparse8() + cal_flops(model, frame=1, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_only_global.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_only_global.py new file mode 100644 index 0000000..735d120 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_only_global.py @@ -0,0 +1,109 @@ +import os +import time + +import torch +import torch.nn as nn +# from fvcore.nn import FlopCountAnalysis +# from fvcore.nn import flop_count_table + +from modules.clip_kc_new import evl_utils + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + t_size=16, + backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=174, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone]( + pretrained=False, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + def forward(self, x, mode='video'): + output = self.backbone(x, mode=mode) + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_only_global_b_sparse8_k400(pretrained=True): + model = EVL( + backbone='vit_only_global_b16', + t_size=8, + backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +def vit_only_global_l_sparse8_k400(pretrained=True): + model = EVL( + backbone='vit_only_global_l14', + t_size=8, + backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, + n_dim=1024, + n_head=16, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_only_global_l_sparse8_k400() + # cal_flops(model, frame=1, size=224) + cal_flops(model, frame=8, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_uniformer.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_uniformer.py new file mode 100644 index 0000000..8b13db4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/model_no_freeze_uniformer.py @@ -0,0 +1,113 @@ +import os +import time + +import torch +import torch.nn as nn +from fvcore.nn import FlopCountAnalysis +from fvcore.nn import flop_count_table + +from . import evl_utils + +PATH_PREFIX = '/mnt/lustre/share_data/likunchang.vendor/code/EVL/clip_kc/model' + + +class EVL(nn.Module): + def __init__(self, + backbone='vit_b16', + t_size=16, + dw_reduction=1.5, + backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=174, + ): + super().__init__() + + # pre-trained from CLIP + self.backbone = evl_utils.__dict__[backbone]( + pretrained=False, + t_size=t_size, + dw_reduction=dw_reduction, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + def forward(self, x, mode='video'): + output = self.backbone(x, mode=mode) + return output + + +def cal_flops(model, frame=8, size=224): + flops = FlopCountAnalysis(model, torch.rand(1, 3, frame, size, size)) + s = time.time() + print(flop_count_table(flops, max_depth=1)) + print(time.time()-s) + + +def vit_fusion_b_sparse16_k400(pretrained=True): + model = EVL( + backbone='vit_fusion_b16', + t_size=16, + dw_reduction=1.5, + backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=400, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +def vit_fusion_b_sparse16_sthsth(pretrained=True): + model = EVL( + backbone='vit_fusion_b16', + t_size=16, + dw_reduction=1.5, + backbone_drop_path_rate=0., + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + num_classes=174, + ) + # if pretrained: + # pretrained_path = os.path.join(PATH_PREFIX, 'fuck.pyth') + # print(f'lodel model from: {pretrained_path}') + # state_dict = torch.load(pretrained_path, map_location='cpu') + # model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + + model = vit_fusion_b_sparse16_k400() + # cal_flops(model, frame=1, size=224) + cal_flops(model, frame=16, size=224) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/clip_kc_new/simple_tokenizer.py b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/clip_kc_new/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Downstream/Video-Text-Retrieval/modules/cross-base/cross_config.json b/Downstream/Video-Text-Retrieval/modules/cross-base/cross_config.json new file mode 100644 index 0000000..2af047f --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/cross-base/cross_config.json @@ -0,0 +1,12 @@ +{ + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 512, + "initializer_range": 0.02, + "intermediate_size": 2048, + "max_position_embeddings": 128, + "num_attention_heads": 8, + "num_hidden_layers": 4, + "vocab_size": 512 +} \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/file_utils.py b/Downstream/Video-Text-Retrieval/modules/file_utils.py new file mode 100644 index 0000000..43fa8ca --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/file_utils.py @@ -0,0 +1,239 @@ +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" + +import os +import logging +import shutil +import tempfile +import json +from urllib.parse import urlparse +from pathlib import Path +from typing import Optional, Tuple, Union, IO, Callable, Set +from hashlib import sha256 +from functools import wraps + +from tqdm import tqdm + +import boto3 +from botocore.exceptions import ClientError +import requests + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + +PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + Path.home() / '.pytorch_pretrained_bert')) + + +def url_to_filename(url: str, etag: str = None) -> str: + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + """ + url_bytes = url.encode('utf-8') + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode('utf-8') + etag_hash = sha256(etag_bytes) + filename += '.' + etag_hash.hexdigest() + + return filename + + +def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]: + """ + Return the url and etag (which may be ``None``) stored for `filename`. + Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + cache_path = os.path.join(cache_dir, filename) + if not os.path.exists(cache_path): + raise FileNotFoundError("file {} not found".format(cache_path)) + + meta_path = cache_path + '.json' + if not os.path.exists(meta_path): + raise FileNotFoundError("file {} not found".format(meta_path)) + + with open(meta_path) as meta_file: + metadata = json.load(meta_file) + url = metadata['url'] + etag = metadata['etag'] + + return url, etag + + +def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str: + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + parsed = urlparse(url_or_filename) + + if parsed.scheme in ('http', 'https', 's3'): + # URL, so get it from the cache (downloading if necessary) + return get_from_cache(url_or_filename, cache_dir) + elif os.path.exists(url_or_filename): + # File, and it exists. + return url_or_filename + elif parsed.scheme == '': + # File, but it doesn't exist. + raise FileNotFoundError("file {} not found".format(url_or_filename)) + else: + # Something unknown + raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) + + +def split_s3_path(url: str) -> Tuple[str, str]: + """Split a full s3 path into the bucket name and path.""" + parsed = urlparse(url) + if not parsed.netloc or not parsed.path: + raise ValueError("bad s3 path {}".format(url)) + bucket_name = parsed.netloc + s3_path = parsed.path + # Remove '/' at beginning of path. + if s3_path.startswith("/"): + s3_path = s3_path[1:] + return bucket_name, s3_path + + +def s3_request(func: Callable): + """ + Wrapper function for s3 requests in order to create more helpful error + messages. + """ + + @wraps(func) + def wrapper(url: str, *args, **kwargs): + try: + return func(url, *args, **kwargs) + except ClientError as exc: + if int(exc.response["Error"]["Code"]) == 404: + raise FileNotFoundError("file {} not found".format(url)) + else: + raise + + return wrapper + + +@s3_request +def s3_etag(url: str) -> Optional[str]: + """Check ETag on S3 object.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_object = s3_resource.Object(bucket_name, s3_path) + return s3_object.e_tag + + +@s3_request +def s3_get(url: str, temp_file: IO) -> None: + """Pull a file directly from S3.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) + + +def http_get(url: str, temp_file: IO) -> None: + req = requests.get(url, stream=True) + content_length = req.headers.get('Content-Length') + total = int(content_length) if content_length is not None else None + progress = tqdm(unit="B", total=total) + for chunk in req.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + progress.update(len(chunk)) + temp_file.write(chunk) + progress.close() + + +def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str: + """ + Given a URL, look for the corresponding dataset in the local cache. + If it's not there, download it. Then return the path to the cached file. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + os.makedirs(cache_dir, exist_ok=True) + + # Get eTag to add to filename, if it exists. + if url.startswith("s3://"): + etag = s3_etag(url) + else: + response = requests.head(url, allow_redirects=True) + if response.status_code != 200: + raise IOError("HEAD request failed for url {} with status code {}" + .format(url, response.status_code)) + etag = response.headers.get("ETag") + + filename = url_to_filename(url, etag) + + # get cache path to put the file + cache_path = os.path.join(cache_dir, filename) + + if not os.path.exists(cache_path): + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with tempfile.NamedTemporaryFile() as temp_file: + logger.info("%s not found in cache, downloading to %s", url, temp_file.name) + + # GET file object + if url.startswith("s3://"): + s3_get(url, temp_file) + else: + http_get(url, temp_file) + + # we are copying the file before closing it, so flush to avoid truncation + temp_file.flush() + # shutil.copyfileobj() starts at the current position, so go to the start + temp_file.seek(0) + + logger.info("copying %s to cache at %s", temp_file.name, cache_path) + with open(cache_path, 'wb') as cache_file: + shutil.copyfileobj(temp_file, cache_file) + + logger.info("creating metadata file for %s", cache_path) + meta = {'url': url, 'etag': etag} + meta_path = cache_path + '.json' + with open(meta_path, 'w') as meta_file: + json.dump(meta, meta_file) + + logger.info("removing temp file %s", temp_file.name) + + return cache_path + + +def read_set_from_file(filename: str) -> Set[str]: + ''' + Extract a de-duped collection (set) of text from a file. + Expected file format is one item per line. + ''' + collection = set() + with open(filename, 'r', encoding='utf-8') as file_: + for line in file_: + collection.add(line.rstrip()) + return collection + + +def get_file_extension(path: str, dot=True, lower: bool = True): + ext = os.path.splitext(path)[1] + ext = ext if dot else ext[1:] + return ext.lower() if lower else ext diff --git a/Downstream/Video-Text-Retrieval/modules/modeling.py b/Downstream/Video-Text-Retrieval/modules/modeling.py new file mode 100644 index 0000000..ed26cb4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/modeling.py @@ -0,0 +1,729 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import torch +from torch import nn +import torch.nn.functional as F + + +from modules.until_module import PreTrainedModel, AllGather, CrossEn +from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip + +from modules.module_clip import CLIP, convert_weights +from modules import clip_evl +from modules.clip_evl.model_no_freeze_only_global import vit_only_global_l_sparse8_k400, vit_only_global_b_sparse8_k400 +from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence + +logger = logging.getLogger(__name__) +allgather = AllGather.apply +from einops import rearrange + +class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, cross_config, *inputs, **kwargs): + super(CLIP4ClipPreTrainedModel, self).__init__(cross_config) + self.cross_config = cross_config + self.clip = None + self.cross = None + + @classmethod + def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs): + + task_config = None + if "task_config" in kwargs.keys(): + task_config = kwargs["task_config"] + if not hasattr(task_config, "local_rank"): + task_config.__dict__["local_rank"] = 0 + elif task_config.local_rank == -1: + task_config.local_rank = 0 + + if state_dict is None: state_dict = {} + pretrained_clip_name = "ViT-B/32" + if hasattr(task_config, 'pretrained_clip_name'): + pretrained_clip_name = task_config.pretrained_clip_name + clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name) + + ############# add ################ + clip_state_dict = clip_state_dict['state_dict'] if 'state_dict' in clip_state_dict else clip_state_dict + for key, val in clip_state_dict.items(): + if key not in state_dict: + state_dict[key] = val.clone() + new_key = key.replace('clip.', '') + if new_key not in state_dict: + state_dict[new_key] = val.clone() + ############## add #################### + + + for key, val in clip_state_dict.items(): + new_key = "clip." + key + if new_key not in state_dict: + state_dict[new_key] = val.clone() + + cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config) + + model = cls(cross_config, clip_state_dict, *inputs, **kwargs) + + ## ===> Initialization trick [HARD CODE] + if model.linear_patch == "3d": + contain_conv2 = False + for key in state_dict.keys(): + if key.find("visual.conv2.weight") > -1: + contain_conv2 = True + break + if contain_conv2 is False and hasattr(model.clip.visual, "conv2"): + cp_weight = state_dict["clip.visual.conv1.weight"].clone() + kernel_size = model.clip.visual.conv2.weight.size(2) + conv2_size = model.clip.visual.conv2.weight.size() + conv2_size = list(conv2_size) + + left_conv2_size = conv2_size.copy() + right_conv2_size = conv2_size.copy() + left_conv2_size[2] = (kernel_size - 1) // 2 + right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2] + + left_zeros, right_zeros = None, None + if left_conv2_size[2] > 0: + left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device) + if right_conv2_size[2] > 0: + right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device) + + cat_list = [] + if left_zeros != None: cat_list.append(left_zeros) + cat_list.append(cp_weight.unsqueeze(2)) + if right_zeros != None: cat_list.append(right_zeros) + cp_weight = torch.cat(cat_list, dim=2) + + state_dict["clip.visual.conv2.weight"] = cp_weight + + if model.sim_header == 'tightTransf': + contain_cross = False + for key in state_dict.keys(): + if key.find("cross.transformer") > -1: + contain_cross = True + break + if contain_cross is False: + for key, val in clip_state_dict.items(): + if key == "positional_embedding": + state_dict["cross.embeddings.position_embeddings.weight"] = val.clone() + continue + if key.find("transformer.resblocks") == 0: + num_layer = int(key.split(".")[2]) + + # cut from beginning + if num_layer < task_config.cross_num_hidden_layers: + state_dict["cross."+key] = val.clone() + continue + + if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf": + contain_frame_position = False + for key in state_dict.keys(): + if key.find("frame_position_embeddings") > -1: + contain_frame_position = True + break + if contain_frame_position is False: + for key, val in clip_state_dict.items(): + if key == "positional_embedding": + state_dict["frame_position_embeddings.weight"] = val.clone() + continue + if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0: + num_layer = int(key.split(".")[2]) + # cut from beginning + if num_layer < task_config.cross_num_hidden_layers: + state_dict[key.replace("transformer.", "transformerClip.")] = val.clone() + continue + ## <=== End of initialization trick + + + ############ evl model should not use this ############# + if state_dict is not None and task_config.clip_evl is False: + model = cls.init_preweight(model, state_dict, task_config=task_config) + return model + +def show_log(task_config, info): + if task_config is None or task_config.local_rank == 0: + logger.warning(info) + +def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None): + if hasattr(source_config, source_attr_name): + if default_value is None or getattr(source_config, source_attr_name) != default_value: + setattr(target_config, target_attr_name, getattr(source_config, source_attr_name)) + show_log(source_config, "Set {}.{}: {}.".format(target_name, + target_attr_name, getattr(target_config, target_attr_name))) + return target_config + +def check_attr(target_name, task_config): + return hasattr(task_config, target_name) and task_config.__dict__[target_name] + +class dual_softmax_loss(nn.Module): + def __init__(self,): + super(dual_softmax_loss, self).__init__() + + def forward(self, sim_matrix, temp=1000): + sim_matrix = sim_matrix * F.softmax(sim_matrix/temp, dim=0)*len(sim_matrix) #With an appropriate temperature parameter, the model achieves higher performance + logpt = F.log_softmax(sim_matrix, dim=-1) + logpt = torch.diag(logpt) + loss = -logpt + return loss + +class CLIP4Clip(CLIP4ClipPreTrainedModel): + def __init__(self, cross_config, clip_state_dict, task_config): + super(CLIP4Clip, self).__init__(cross_config) + self.task_config = task_config + self.ignore_video_index = -1 + + ### comment this for now + # assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings + + self._stage_one = True + self._stage_two = False + + show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two)) + + self.loose_type = False + if self._stage_one and check_attr('loose_type', self.task_config): + self.loose_type = True + show_log(task_config, "Test retrieval by loose type.") + + # CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===> + ##############add################### + if 'clip.visual.proj' in clip_state_dict: + new_dict = {} + for k, v in clip_state_dict.items(): + new_k = k.replace('clip.', '') + new_dict[new_k] = v.clone() + + clip_state_dict = new_dict + ##############add################### + + vit = "visual.proj" in clip_state_dict + assert vit + if vit: + vision_width = clip_state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in + [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = clip_state_dict["text_projection"].shape[1] + context_length = clip_state_dict["positional_embedding"].shape[0] + vocab_size = clip_state_dict["token_embedding.weight"].shape[0] + transformer_width = clip_state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks"))) + + show_log(task_config, "\t embed_dim: {}".format(embed_dim)) + show_log(task_config, "\t image_resolution: {}".format(image_resolution)) + show_log(task_config, "\t vision_layers: {}".format(vision_layers)) + show_log(task_config, "\t vision_width: {}".format(vision_width)) + show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size)) + show_log(task_config, "\t context_length: {}".format(context_length)) + show_log(task_config, "\t vocab_size: {}".format(vocab_size)) + show_log(task_config, "\t transformer_width: {}".format(transformer_width)) + show_log(task_config, "\t transformer_heads: {}".format(transformer_heads)) + show_log(task_config, "\t transformer_layers: {}".format(transformer_layers)) + + self.linear_patch = '2d' + if hasattr(task_config, "linear_patch"): + self.linear_patch = task_config.linear_patch + show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch)) + + # use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40 + cut_top_layer = 0 + show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer)) + + #### paramters for DRL, we do not use them for now #### + self.interaction = task_config.interaction if hasattr(task_config, 'interaction') else 'no' + self.wti_arch = task_config.wti_arch if hasattr(task_config, 'wti_arch') else 0 + self.cdcr = task_config.cdcr if hasattr(task_config, 'cdcr') else 0 + + if hasattr(task_config, "clip_evl") and task_config.clip_evl == True: + self.clip, _ = clip_evl.load(task_config.pretrained_path, t_size=task_config.max_frames, mergeclip=task_config.mergeclip, mergeweight=task_config.mergeweight, clip_state_dict=clip_state_dict) + self.clip = self.clip.float() + self.clip_evl = True + + else: + self.clip_evl = False + self.clip = CLIP( + embed_dim, + image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer, + linear_patch=self.linear_patch + ).float() + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in clip_state_dict: + del clip_state_dict[key] + + ###################### Note this change ############################### + if not self.clip_evl: + convert_weights(self.clip) + ####################################################################### + # <=== End of CLIP Encoders + + + self.sim_header = 'meanP' + if hasattr(task_config, "sim_header"): + self.sim_header = task_config.sim_header + show_log(task_config, "\t sim_header: {}".format(self.sim_header)) + if self.sim_header == "tightTransf": assert self.loose_type is False + + cross_config.max_position_embeddings = context_length + if self.loose_type is False: + # Cross Encoder ===> + cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers") + self.cross = CrossModel(cross_config) + # <=== End of Cross Encoder + self.similarity_dense = nn.Linear(cross_config.hidden_size, 1) + + if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf": + self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size) + if self.sim_header == "seqTransf": + self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers, + heads=transformer_heads, ) + if self.sim_header == "seqLSTM": + self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size, + batch_first=True, bidirectional=False, num_layers=1) + + ######## add wti ######### + if self.cdcr: + self.cdcr_alpha1 = 0.16 + self.cdcr_alpha2 = 0 + self.cdcr_lambda = 0.001 + if self.interaction == 'wti' or self.interaction == 'ti': + if self.wti_arch == 1: + self.text_weight_fc = nn.Linear(transformer_width, 1) + self.video_weight_fc = nn.Linear(transformer_width, 1) + + elif self.wti_arch == 2: + self.text_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + self.video_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + elif self.wti_arch == 3: + self.text_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + self.video_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + + + self.loss_fct = CrossEn() + self.loss_dsl = dual_softmax_loss() + + ############## this has to be deactivated for clipevl ############## + if not self.clip_evl: + self.apply(self.init_weights) + + + def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None): + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + # T x 3 x H x W + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask, + video, video_mask, shaped=True, video_frame=video_frame) + if self.training: + loss = 0. + cdcr_loss = 0. + if self.wti_interaction != 'no' and self.cdcr: + sim_matrix, _, cdcr_loss = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask, + shaped=True, loose_type=self.loose_type) + else: + sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask, + shaped=True, loose_type=self.loose_type) + + sim_loss1 = self.loss_fct(sim_matrix) + sim_loss2 = self.loss_fct(sim_matrix.T) + sim_loss = (sim_loss1 + sim_loss2) / 2 + loss += sim_loss + + if self.cdcr: + loss += self.cdcr_lambda * cdcr_loss + return loss + else: + return None + + def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False): + if shaped is False: + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + + bs_pair = input_ids.size(0) + ### for naive + # sequence_hidden = self.clip.encode_text(input_ids).float() + ### for wti + if self.interaction == 'wti' or self.interaction == 'ti': + if self.clip_evl: + sequence_hidden = self.clip.encode_text(input_ids, return_all_feats=True)[1].float() + else: + sequence_hidden = self.clip.encode_text(input_ids, return_hidden=True)[1].float() + else: + sequence_hidden = self.clip.encode_text(input_ids).float() + + sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1)) + + return sequence_hidden + + def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1): + if shaped is False: + video_mask = video_mask.view(-1, video_mask.shape[-1]) + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + bs_pair = video_mask.size(0) + if self.clip_evl: + if len(video.size()) == 4: + # [b, t, c, h, w] + video = video.view(bs_pair, -1, video.size(-3), video.size(-2), video.size(-1)) + video = video.permute(0, 2, 1, 3, 4).contiguous() + # [N, 1, d], [L, N, T, d] + evl_output, visual_output = self.clip.encode_video(video, return_all_feats=True) + # visual_output = visual_output.float() + visual_hidden = evl_output.float() + if self.interaction == 'wti': + # [L, N, T, d2] -> [N, T, d2] -> [N, T, d] + visual_hidden = self.clip.visual_ln_post(visual_output[0]) @ self.clip.visual_proj + else: + visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float() + + visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1)) + + return visual_hidden + + def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1): + if shaped is False: + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True) + visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame) + return sequence_output, visual_output + + def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask): + + concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames + concat_mask = torch.cat((attention_mask, video_mask), dim=1) + text_type_ = torch.zeros_like(attention_mask) + video_type_ = torch.ones_like(video_mask) + concat_type = torch.cat((text_type_, video_type_), dim=1) + + cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True) + cross_output = cross_layers[-1] + + return cross_output, pooled_output, concat_mask + + def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask): + attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1) + attention_mask_un[:, 0, :] = 0. + sequence_output = sequence_output * attention_mask_un + text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float) + return text_out + + def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,): + video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1) + visual_output = visual_output * video_mask_un + video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float) + video_mask_un_sum[video_mask_un_sum == 0.] = 1. + video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum + return video_out + + def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,): + text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask) + video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask) + + return text_out, video_out + + def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"): + sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() + if sim_header == "meanP": + # Default: Parameter-free type + pass + elif sim_header == "seqLSTM": + # Sequential type: LSTM + visual_output_original = visual_output + visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(), + batch_first=True, enforce_sorted=False) + visual_output, _ = self.lstm_visual(visual_output) + if self.training: self.lstm_visual.flatten_parameters() + visual_output, _ = pad_packed_sequence(visual_output, batch_first=True) + visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1) + visual_output = visual_output + visual_output_original + elif sim_header == "seqTransf": + # Sequential type: Transformer Encoder + visual_output_original = visual_output + seq_length = visual_output.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device) + position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1) + frame_position_embeddings = self.frame_position_embeddings(position_ids) + visual_output = visual_output + frame_position_embeddings + + extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0 + extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1) + visual_output = visual_output.permute(1, 0, 2) # NLD -> LND + visual_output = self.transformerClip(visual_output, extended_video_mask) + visual_output = visual_output.permute(1, 0, 2) # LND -> NLD + visual_output = visual_output + visual_output_original + + if self.training: + visual_output = allgather(visual_output, self.task_config) + video_mask = allgather(video_mask, self.task_config) + sequence_output = allgather(sequence_output, self.task_config) + torch.distributed.barrier() + + + visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask) + visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) + + sequence_output = sequence_output.squeeze(1) + sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True) + + logit_scale = self.clip.logit_scale.exp() + retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t()) + return retrieve_logits + + def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask): + sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() + + b_text, s_text, h_text = sequence_output.size() + b_visual, s_visual, h_visual = visual_output.size() + + retrieve_logits_list = [] + + step_size = b_text # set smaller to reduce memory cost + split_size = [step_size] * (b_text // step_size) + release_size = b_text - sum(split_size) + if release_size > 0: + split_size += [release_size] + + # due to clip text branch retrun the last hidden + attention_mask = torch.ones(sequence_output.size(0), 1)\ + .to(device=attention_mask.device, dtype=attention_mask.dtype) + + sequence_output_splits = torch.split(sequence_output, split_size, dim=0) + attention_mask_splits = torch.split(attention_mask, split_size, dim=0) + for i in range(len(split_size)): + sequence_output_row = sequence_output_splits[i] + attention_mask_row = attention_mask_splits[i] + sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1) + sequence_output_l = sequence_output_l.view(-1, s_text, h_text) + attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1) + attention_mask_l = attention_mask_l.view(-1, s_text) + + step_truth = sequence_output_row.size(0) + visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1) + visual_output_r = visual_output_r.view(-1, s_visual, h_visual) + video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1) + video_mask_r = video_mask_r.view(-1, s_visual) + cross_output, pooled_output, concat_mask = \ + self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r) + retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual) + retrieve_logits_list.append(retrieve_logits_row) + + retrieve_logits = torch.cat(retrieve_logits_list, dim=0) + return retrieve_logits + + def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False): + if shaped is False: + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + contrastive_direction = () + ### add wti ### + if self.interaction == 'wti' or self.interaction == 'ti': + if self.cdcr == 0: + retrieve_logits, _, _ = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask) + return retrieve_logits, contrastive_direction + else: + retrieve_logits, _, cdcr_loss = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask) + return retrieve_logits, contrastive_direction, cdcr_loss + ################ + if loose_type: + assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"] + retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header) + else: + assert self.sim_header in ["tightTransf"] + retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, ) + return retrieve_logits, contrastive_direction + + + + def wti_interaction(self, text_feat, video_feat, text_mask, video_mask): + text_feat, video_feat = text_feat.contiguous(), video_feat.contiguous() + if self.training and torch.cuda.is_available(): # batch merge here + text_feat = allgather(text_feat, self.task_config) + video_feat = allgather(video_feat, self.task_config) + text_mask = allgather(text_mask, self.task_config) + video_mask = allgather(video_mask, self.task_config) + torch.distributed.barrier() # force sync + if self.interaction == 'wti': + text_weight = self.text_weight_fc(text_feat).squeeze(2) # B x N_t x D -> B x N_t + text_weight.masked_fill_(torch.tensor((1 - text_mask), dtype=torch.bool), float("-inf")) + text_weight = torch.softmax(text_weight, dim=-1) # B x N_t + + video_weight = self.video_weight_fc(video_feat).squeeze(2) # B x N_v x D -> B x N_v + video_weight.masked_fill_(torch.tensor((1 - video_mask), dtype=torch.bool), float("-inf")) + video_weight = torch.softmax(video_weight, dim=-1) # B x N_v + + text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True) + video_feat = video_feat / video_feat.norm(dim=-1, keepdim=True) + + retrieve_logits = torch.einsum('atd,bvd->abtv', [text_feat, video_feat]) + retrieve_logits = torch.einsum('abtv,at->abtv', [retrieve_logits, text_mask]) + retrieve_logits = torch.einsum('abtv,bv->abtv', [retrieve_logits, video_mask]) + text_sum = text_mask.sum(-1) + video_sum = video_mask.sum(-1) + + # max for video token + if self.interaction == 'ti': # token-wise interaction + t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt + v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv + t2v_logits = torch.sum(t2v_logits, dim=2) / (text_sum.unsqueeze(1)) + v2t_logits = torch.sum(v2t_logits, dim=2) / (video_sum.unsqueeze(0)) + retrieve_logits = (t2v_logits + v2t_logits) / 2.0 + + elif self.interaction == 'wti': # weighted token-wise interaction + t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt + t2v_logits = torch.einsum('abt,at->ab', [t2v_logits, text_weight]) + + v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv + v2t_logits = torch.einsum('abv,bv->ab', [v2t_logits, video_weight]) + retrieve_logits = (t2v_logits + v2t_logits) / 2.0 + + if self.training: + logit_scale = self.clip.logit_scale.exp() + retrieve_logits = logit_scale * retrieve_logits + + if self.cdcr == 1: + # simple random + _text_feat = text_feat[torch.arange(text_feat.shape[0]), + torch.randint_like(text_sum, 0, 10000) % text_sum, :] + _video_feat = video_feat[torch.arange(video_feat.shape[0]), + torch.randint_like(video_sum, 0, 10000) % video_sum, :] + z_a_norm = (_text_feat - _text_feat.mean(0)) / _text_feat.std(0) # NxN_sxD + z_b_norm = (_video_feat - _video_feat.mean(0)) / _video_feat.std(0) # NxN_txD + + # cross-correlation matrix + B, D = z_a_norm.shape + c = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / B # DxD + # loss + on_diag = torch.diagonal(c).add_(-1).pow_(2).sum() + off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum() + cdcr_loss = (on_diag * self.cdcr_alpha1 + off_diag * self.cdcr_alpha2) + return retrieve_logits, retrieve_logits.T, cdcr_loss + elif self.cdcr == 2: + # selecet max + max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])] + max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])] + + max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]), + max_idx2.flatten()] + max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]), + max_idx1.flatten()] + + t_feat = text_feat.reshape(-1, text_feat.shape[-1]) + t_mask = text_mask.flatten().type(torch.bool) + v_feat = video_feat.reshape(-1, text_feat.shape[-1]) + v_mask = video_mask.flatten().type(torch.bool) + t_feat = t_feat[t_mask] + v_feat = v_feat[v_mask] + max_t_feat = max_t_feat[v_mask] + max_v_feat = max_v_feat[t_mask] + + z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD + z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD + + x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD + x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD + + # cross-correlation matrix + N, D = z_a_norm.shape + c1 = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / N # DxD + N, D = x_a_norm.shape + c2 = torch.einsum('ac,ad->cd', x_a_norm, x_b_norm) / N # DxD + c = (c1 + c2) / 2.0 + # loss + on_diag = torch.diagonal(c).add_(-1).pow_(2).sum() + off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum() + cdcr_loss = (on_diag * self.cdcr_alpha1 + off_diag * self.cdcr_alpha2) + return retrieve_logits, retrieve_logits.T, cdcr_loss + elif self.cdcr == 3: + # selecet max + max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])] + max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])] + + max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]), + max_idx2.flatten()].squeeze(1) + max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]), + max_idx1.flatten()].squeeze(1) + + t_feat = text_feat.reshape(-1, text_feat.shape[-1]) + t_mask = text_mask.flatten().type(torch.bool) + v_feat = video_feat.reshape(-1, video_feat.shape[-1]) + v_mask = video_mask.flatten().type(torch.bool) + t_feat = t_feat[t_mask] + v_feat = v_feat[v_mask] + max_t_feat = max_t_feat[v_mask] + max_v_feat = max_v_feat[t_mask] + text_weight = text_weight.flatten()[t_mask] + video_weight = video_weight.flatten()[v_mask] + + z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD + z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD + + x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD + x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD + + # cross-correlation matrix + N, D = z_a_norm.shape + B = text_feat.shape[0] + c1 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', z_a_norm, z_b_norm), + text_weight) / B # DxD + c2 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', x_a_norm, x_b_norm), + video_weight) / B # DxD + c = (c1 + c2) / 2.0 + # loss + on_diag = torch.diagonal(c).add_(-1).pow_(2).sum() + off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum() + cdcr_loss = (on_diag * self.cdcr_alpha1 + off_diag * self.cdcr_alpha2) + return retrieve_logits, retrieve_logits.T, cdcr_loss + else: + return retrieve_logits, retrieve_logits.T, 0.0 + else: + return retrieve_logits, retrieve_logits.T, 0.0 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/modeling_backup.py b/Downstream/Video-Text-Retrieval/modules/modeling_backup.py new file mode 100644 index 0000000..67ee290 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/modeling_backup.py @@ -0,0 +1,771 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import torch +from torch import nn +import torch.nn.functional as F + + +from modules.until_module import PreTrainedModel, AllGather, CrossEn +from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip + +from modules.module_clip import CLIP, convert_weights +from modules import clip_evl +from modules import clip_kc +from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence +from ipdb import set_trace + +logger = logging.getLogger(__name__) +allgather = AllGather.apply + +class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, cross_config, *inputs, **kwargs): + super(CLIP4ClipPreTrainedModel, self).__init__(cross_config) + self.cross_config = cross_config + self.clip = None + self.cross = None + + @classmethod + def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs): + + task_config = None + if "task_config" in kwargs.keys(): + task_config = kwargs["task_config"] + if not hasattr(task_config, "local_rank"): + task_config.__dict__["local_rank"] = 0 + elif task_config.local_rank == -1: + task_config.local_rank = 0 + + if state_dict is None: state_dict = {} + pretrained_clip_name = "ViT-B/32" + if hasattr(task_config, 'pretrained_clip_name'): + pretrained_clip_name = task_config.pretrained_clip_name + clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name) + + + # set_trace() + ############# add ################ + clip_state_dict = clip_state_dict['state_dict'] if 'state_dict' in clip_state_dict else clip_state_dict + for key, val in clip_state_dict.items(): + if key not in state_dict: + state_dict[key] = val.clone() + new_key = key.replace('clip.', '') + if new_key not in state_dict: + state_dict[new_key] = val.clone() + ############## add #################### + + + for key, val in clip_state_dict.items(): + new_key = "clip." + key + if new_key not in state_dict: + state_dict[new_key] = val.clone() + + cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config) + + model = cls(cross_config, clip_state_dict, *inputs, **kwargs) + + ## ===> Initialization trick [HARD CODE] + if model.linear_patch == "3d": + contain_conv2 = False + for key in state_dict.keys(): + if key.find("visual.conv2.weight") > -1: + contain_conv2 = True + break + if contain_conv2 is False and hasattr(model.clip.visual, "conv2"): + cp_weight = state_dict["clip.visual.conv1.weight"].clone() + kernel_size = model.clip.visual.conv2.weight.size(2) + conv2_size = model.clip.visual.conv2.weight.size() + conv2_size = list(conv2_size) + + left_conv2_size = conv2_size.copy() + right_conv2_size = conv2_size.copy() + left_conv2_size[2] = (kernel_size - 1) // 2 + right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2] + + left_zeros, right_zeros = None, None + if left_conv2_size[2] > 0: + left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device) + if right_conv2_size[2] > 0: + right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device) + + cat_list = [] + if left_zeros != None: cat_list.append(left_zeros) + cat_list.append(cp_weight.unsqueeze(2)) + if right_zeros != None: cat_list.append(right_zeros) + cp_weight = torch.cat(cat_list, dim=2) + + state_dict["clip.visual.conv2.weight"] = cp_weight + + if model.sim_header == 'tightTransf': + contain_cross = False + for key in state_dict.keys(): + if key.find("cross.transformer") > -1: + contain_cross = True + break + if contain_cross is False: + for key, val in clip_state_dict.items(): + if key == "positional_embedding": + state_dict["cross.embeddings.position_embeddings.weight"] = val.clone() + continue + if key.find("transformer.resblocks") == 0: + num_layer = int(key.split(".")[2]) + + # cut from beginning + if num_layer < task_config.cross_num_hidden_layers: + state_dict["cross."+key] = val.clone() + continue + + if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf": + contain_frame_position = False + for key in state_dict.keys(): + if key.find("frame_position_embeddings") > -1: + contain_frame_position = True + break + if contain_frame_position is False: + for key, val in clip_state_dict.items(): + if key == "positional_embedding": + state_dict["frame_position_embeddings.weight"] = val.clone() + continue + if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0: + num_layer = int(key.split(".")[2]) + # cut from beginning + if num_layer < task_config.cross_num_hidden_layers: + state_dict[key.replace("transformer.", "transformerClip.")] = val.clone() + continue + ## <=== End of initialization trick + + if state_dict is not None: + model = cls.init_preweight(model, state_dict, task_config=task_config) + + return model + +def show_log(task_config, info): + if task_config is None or task_config.local_rank == 0: + logger.warning(info) + +def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None): + if hasattr(source_config, source_attr_name): + if default_value is None or getattr(source_config, source_attr_name) != default_value: + setattr(target_config, target_attr_name, getattr(source_config, source_attr_name)) + show_log(source_config, "Set {}.{}: {}.".format(target_name, + target_attr_name, getattr(target_config, target_attr_name))) + return target_config + +def check_attr(target_name, task_config): + return hasattr(task_config, target_name) and task_config.__dict__[target_name] + +class dual_softmax_loss(nn.Module): + def __init__(self,): + super(dual_softmax_loss, self).__init__() + + def forward(self, sim_matrix, temp=1000): + sim_matrix = sim_matrix * F.softmax(sim_matrix/temp, dim=0)*len(sim_matrix) #With an appropriate temperature parameter, the model achieves higher performance + logpt = F.log_softmax(sim_matrix, dim=-1) + logpt = torch.diag(logpt) + loss = -logpt + return loss + +class CLIP4Clip(CLIP4ClipPreTrainedModel): + def __init__(self, cross_config, clip_state_dict, task_config): + super(CLIP4Clip, self).__init__(cross_config) + self.task_config = task_config + self.ignore_video_index = -1 + + assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings + + self._stage_one = True + self._stage_two = False + + show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two)) + + self.loose_type = False + if self._stage_one and check_attr('loose_type', self.task_config): + self.loose_type = True + show_log(task_config, "Test retrieval by loose type.") + + # CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===> + ##############add################### + if 'clip.visual.proj' in clip_state_dict: + new_dict = {} + for k, v in clip_state_dict.items(): + new_k = k.replace('clip.', '') + new_dict[new_k] = v.clone() + + clip_state_dict = new_dict + ##############add################### + + vit = "visual.proj" in clip_state_dict + assert vit + if vit: + vision_width = clip_state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in + [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = clip_state_dict["text_projection"].shape[1] + context_length = clip_state_dict["positional_embedding"].shape[0] + vocab_size = clip_state_dict["token_embedding.weight"].shape[0] + transformer_width = clip_state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks"))) + + show_log(task_config, "\t embed_dim: {}".format(embed_dim)) + show_log(task_config, "\t image_resolution: {}".format(image_resolution)) + show_log(task_config, "\t vision_layers: {}".format(vision_layers)) + show_log(task_config, "\t vision_width: {}".format(vision_width)) + show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size)) + show_log(task_config, "\t context_length: {}".format(context_length)) + show_log(task_config, "\t vocab_size: {}".format(vocab_size)) + show_log(task_config, "\t transformer_width: {}".format(transformer_width)) + show_log(task_config, "\t transformer_heads: {}".format(transformer_heads)) + show_log(task_config, "\t transformer_layers: {}".format(transformer_layers)) + + self.linear_patch = '2d' + if hasattr(task_config, "linear_patch"): + self.linear_patch = task_config.linear_patch + show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch)) + + # use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40 + cut_top_layer = 0 + show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer)) + + #set_trace() + if hasattr(task_config, "clip_evl") and task_config.clip_evl == True: + ckpt_path = '/mnt/cache/share_data/liyizhuo.vendor/projects/all-in-one/outputs/outputs_k400/models/clip_evl_contrastive_mlm_webvid_howto_k400_lr1e5/last.ckpt' + # ckpt_path = '/mnt/cache/share_data/liyizhuo.vendor/projects/all-in-one/outputs/outputs_k400/models/clip_evl_contrastive_webvid_howto_k400_lr1e5/last.ckpt' + self.clip, _ = clip_evl.load(ckpt_path, t_size=8) + #set_trace() + self.clip = self.clip.float() + self.clip_evl = True + #set_trace() + elif hasattr(task_config, "clip_kc") and task_config.clip_kc == True: + # ckpt_path = '/mnt/lustre/share_data/liyizhuo/projects/all-in-one/outputs/outputs_msrvtt_choice/models/clip_kc_contrastive_mlm_webvid_howto_msrvtt_choice_lr5e6_bz256_cosine_step25k_sanity/epoch=0-step=359.ckpt' + ckpt_path = '/mnt/lustre/share_data/liyizhuo.vendor/projects/all-in-one/outputs/outputs_msrvtt_choice/models/clip_kc_contrastive_webvid_howto_clip_lm_01_msrvtt_choice_lr5e6_bz1024_cosine_step100k_freeze_text_ced00/epoch=13-step=47028.ckpt' + self.clip, _ = clip_kc.load(ckpt_path) + self.clip = self.clip.float() + self.clip_evl = True ### also use temporal + + else: + self.clip_evl = False + self.clip = CLIP( + embed_dim, + image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer, + linear_patch=self.linear_patch + ).float() + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in clip_state_dict: + del clip_state_dict[key] + + ###################### Note this change ############################### + convert_weights(self.clip) + ####################################################################### + # <=== End of CLIP Encoders + + self.sim_header = 'meanP' + if hasattr(task_config, "sim_header"): + self.sim_header = task_config.sim_header + show_log(task_config, "\t sim_header: {}".format(self.sim_header)) + if self.sim_header == "tightTransf": assert self.loose_type is False + + cross_config.max_position_embeddings = context_length + if self.loose_type is False: + # Cross Encoder ===> + cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers") + self.cross = CrossModel(cross_config) + # <=== End of Cross Encoder + self.similarity_dense = nn.Linear(cross_config.hidden_size, 1) + + if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf": + self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size) + if self.sim_header == "seqTransf": + self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers, + heads=transformer_heads, ) + if self.sim_header == "seqLSTM": + self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size, + batch_first=True, bidirectional=False, num_layers=1) + + ######## add wti ######### + self.config.interaction = 'wti' + #self.config.interaction = 'no' + self.config.wti_arch = 2 + self.config.cdcr = 0 + self.config.dsl = False + if self.config.cdcr: + self.config.cdcr_alpha1 = 0.16 + self.config.cdcr_alpha2 = 0 + self.config.cdcr_lambda = 0.001 + if self.config.interaction == 'wti' or self.config.interaction == 'ti': + if self.config.wti_arch == 1: + self.text_weight_fc = nn.Linear(transformer_width, 1) + self.video_weight_fc = nn.Linear(transformer_width, 1) + elif self.config.wti_arch == 2: + self.text_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + self.video_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + elif self.config.wti_arch == 3: + self.text_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + self.video_weight_fc = nn.Sequential( + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True), + nn.Linear(transformer_width, 1)) + + + self.loss_fct = CrossEn() + self.loss_dsl = dual_softmax_loss() + + ############## this is important, has to be deactivated for clipevl ############## + if not self.clip_evl: + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None): + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + # T x 3 x H x W + # set_trace() + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + #set_trace() + + sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask, + video, video_mask, shaped=True, video_frame=video_frame) + #set_trace() + + if self.training: + loss = 0. + cdcr_loss = 0. + if self.wti_interaction != 'no' and self.config.cdcr: + sim_matrix, _, cdcr_loss = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask, + shaped=True, loose_type=self.loose_type) + else: + sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask, + shaped=True, loose_type=self.loose_type) + #sim_matrix = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask, + # shaped=True, loose_type=self.loose_type) + #set_trace() + + sim_loss1 = self.loss_fct(sim_matrix) + sim_loss2 = self.loss_fct(sim_matrix.T) + sim_loss = (sim_loss1 + sim_loss2) / 2 + loss += sim_loss + + if self.config.dsl: + dsl_loss1 = self.loss_dsl(sim_matrix).mean() + dsl_loss2 = self.loss_dsl(sim_matrix.T).mean() + loss += (dsl_loss1 + dsl_loss2) / 2 + + if self.config.cdcr: + loss += self.config.cdcr_lambda * cdcr_loss + return loss + else: + return None + + def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False): + if shaped is False: + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + + bs_pair = input_ids.size(0) + ### for naive + # sequence_hidden = self.clip.encode_text(input_ids).float() + ### for wti + if self.config.interaction == 'wti' or self.config.interaction == 'ti': + if self.clip_evl: + sequence_hidden = self.clip.encode_text(input_ids, return_all_feats=True)[1].float() + else: + sequence_hidden = self.clip.encode_text(input_ids, return_hidden=True)[1].float() + else: + #set_trace() + sequence_hidden = self.clip.encode_text(input_ids).float() + #set_trace() + sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1)) + + return sequence_hidden + + def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1): + if shaped is False: + video_mask = video_mask.view(-1, video_mask.shape[-1]) + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + bs_pair = video_mask.size(0) + #set_trace() + if self.clip_evl: + visual_hidden = self.clip.encode_video(video).float() + else: + visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float() + visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1)) + #set_trace() + return visual_hidden + + def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1): + if shaped is False: + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True) + visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame) + + return sequence_output, visual_output + + def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask): + + concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames + concat_mask = torch.cat((attention_mask, video_mask), dim=1) + text_type_ = torch.zeros_like(attention_mask) + video_type_ = torch.ones_like(video_mask) + concat_type = torch.cat((text_type_, video_type_), dim=1) + + cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True) + cross_output = cross_layers[-1] + + return cross_output, pooled_output, concat_mask + + def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask): + attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1) + attention_mask_un[:, 0, :] = 0. + sequence_output = sequence_output * attention_mask_un + text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float) + return text_out + + def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,): + video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1) + visual_output = visual_output * video_mask_un + video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float) + video_mask_un_sum[video_mask_un_sum == 0.] = 1. + video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum + return video_out + + def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,): + text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask) + video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask) + + return text_out, video_out + + def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"): + sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() + + if sim_header == "meanP": + # Default: Parameter-free type + pass + elif sim_header == "seqLSTM": + # Sequential type: LSTM + visual_output_original = visual_output + visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(), + batch_first=True, enforce_sorted=False) + visual_output, _ = self.lstm_visual(visual_output) + if self.training: self.lstm_visual.flatten_parameters() + visual_output, _ = pad_packed_sequence(visual_output, batch_first=True) + visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1) + visual_output = visual_output + visual_output_original + elif sim_header == "seqTransf": + # Sequential type: Transformer Encoder + visual_output_original = visual_output + seq_length = visual_output.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device) + position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1) + frame_position_embeddings = self.frame_position_embeddings(position_ids) + visual_output = visual_output + frame_position_embeddings + + extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0 + extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1) + visual_output = visual_output.permute(1, 0, 2) # NLD -> LND + visual_output = self.transformerClip(visual_output, extended_video_mask) + visual_output = visual_output.permute(1, 0, 2) # LND -> NLD + visual_output = visual_output + visual_output_original + + if self.training: + visual_output = allgather(visual_output, self.task_config) + video_mask = allgather(video_mask, self.task_config) + sequence_output = allgather(sequence_output, self.task_config) + torch.distributed.barrier() + + visual_output = visual_output.squeeze(1) + visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) + #### we do not need to mean pooling over frames for evl and kc + #set_trace() + # if not self.clip_evl: + # visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask) + # visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) + # else: + # visual_output = visual_output.squeeze(1) + #set_trace() + + sequence_output = sequence_output.squeeze(1) + sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True) + + logit_scale = self.clip.logit_scale.exp() + + # if self.config.dsl: + # # temp = 1 is best for zero-shot inference + # temp = 1000 + # retrieve_logits = torch.matmul(sequence_output, visual_output.t()) + # retrieve_logits = retrieve_logits * F.softmax(retrieve_logits/temp, dim=0) * len(retrieve_logits) + # # retrieve_logits = retrieve_logits * F.softmax(retrieve_logits/temp, dim=1) * len(retrieve_logits) + # # retrieve_logits = F.softmax(retrieve_logits/temp, dim=1) * F.softmax(retrieve_logits/temp, dim=0) * len(retrieve_logits) * len(retrieve_logits) + # retrieve_logits = logit_scale * retrieve_logits + # else: + # retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t()) + retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t()) + return retrieve_logits + + def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask): + sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() + + b_text, s_text, h_text = sequence_output.size() + b_visual, s_visual, h_visual = visual_output.size() + + retrieve_logits_list = [] + + step_size = b_text # set smaller to reduce memory cost + split_size = [step_size] * (b_text // step_size) + release_size = b_text - sum(split_size) + if release_size > 0: + split_size += [release_size] + + # due to clip text branch retrun the last hidden + attention_mask = torch.ones(sequence_output.size(0), 1)\ + .to(device=attention_mask.device, dtype=attention_mask.dtype) + + sequence_output_splits = torch.split(sequence_output, split_size, dim=0) + attention_mask_splits = torch.split(attention_mask, split_size, dim=0) + for i in range(len(split_size)): + sequence_output_row = sequence_output_splits[i] + attention_mask_row = attention_mask_splits[i] + sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1) + sequence_output_l = sequence_output_l.view(-1, s_text, h_text) + attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1) + attention_mask_l = attention_mask_l.view(-1, s_text) + + step_truth = sequence_output_row.size(0) + visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1) + visual_output_r = visual_output_r.view(-1, s_visual, h_visual) + video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1) + video_mask_r = video_mask_r.view(-1, s_visual) + + cross_output, pooled_output, concat_mask = \ + self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r) + retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual) + + retrieve_logits_list.append(retrieve_logits_row) + + retrieve_logits = torch.cat(retrieve_logits_list, dim=0) + return retrieve_logits + + def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False, use_dsl=True): + if shaped is False: + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + contrastive_direction = () + ### add wti ### + if self.config.interaction == 'wti' or self.config.interaction == 'ti': + if self.config.cdcr == 0: + retrieve_logits, _, _ = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask) + return retrieve_logits, contrastive_direction + # return retrieve_logits + else: + retrieve_logits, _, cdcr_loss = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask) + return retrieve_logits, contrastive_direction, cdcr_loss + #set_trace() + ################ + if loose_type: + assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"] + retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header, use_dsl=use_dsl) + else: + assert self.sim_header in ["tightTransf"] + retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, ) + #set_trace() + return retrieve_logits, contrastive_direction + # return retrieve_logits + + + + def wti_interaction(self, text_feat, video_feat, text_mask, video_mask): + #set_trace() + text_feat, video_feat = text_feat.contiguous(), video_feat.contiguous() + if self.training and torch.cuda.is_available(): # batch merge here + # text_feat = allgather(text_feat, self.config) + # video_feat = allgather(video_feat, self.config) + # text_mask = allgather(text_mask, self.config) + # video_mask = allgather(video_mask, self.config) + text_feat = allgather(text_feat, self.task_config) + video_feat = allgather(video_feat, self.task_config) + text_mask = allgather(text_mask, self.task_config) + video_mask = allgather(video_mask, self.task_config) + torch.distributed.barrier() # force sync + + if self.config.interaction == 'wti': + text_weight = self.text_weight_fc(text_feat).squeeze(2) # B x N_t x D -> B x N_t + text_weight.masked_fill_(torch.tensor((1 - text_mask), dtype=torch.bool), float("-inf")) + text_weight = torch.softmax(text_weight, dim=-1) # B x N_t + + video_weight = self.video_weight_fc(video_feat).squeeze(2) # B x N_v x D -> B x N_v + video_weight.masked_fill_(torch.tensor((1 - video_mask), dtype=torch.bool), float("-inf")) + video_weight = torch.softmax(video_weight, dim=-1) # B x N_v + + text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True) + video_feat = video_feat / video_feat.norm(dim=-1, keepdim=True) + + retrieve_logits = torch.einsum('atd,bvd->abtv', [text_feat, video_feat]) + retrieve_logits = torch.einsum('abtv,at->abtv', [retrieve_logits, text_mask]) + retrieve_logits = torch.einsum('abtv,bv->abtv', [retrieve_logits, video_mask]) + text_sum = text_mask.sum(-1) + video_sum = video_mask.sum(-1) + + # max for video token + if self.config.interaction == 'ti': # token-wise interaction + t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt + v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv + t2v_logits = torch.sum(t2v_logits, dim=2) / (text_sum.unsqueeze(1)) + v2t_logits = torch.sum(v2t_logits, dim=2) / (video_sum.unsqueeze(0)) + retrieve_logits = (t2v_logits + v2t_logits) / 2.0 + + elif self.config.interaction == 'wti': # weighted token-wise interaction + t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt + t2v_logits = torch.einsum('abt,at->ab', [t2v_logits, text_weight]) + + v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv + v2t_logits = torch.einsum('abv,bv->ab', [v2t_logits, video_weight]) + retrieve_logits = (t2v_logits + v2t_logits) / 2.0 + + if self.training: + logit_scale = self.clip.logit_scale.exp() + retrieve_logits = logit_scale * retrieve_logits + + if self.config.cdcr == 1: + # simple random + _text_feat = text_feat[torch.arange(text_feat.shape[0]), + torch.randint_like(text_sum, 0, 10000) % text_sum, :] + _video_feat = video_feat[torch.arange(video_feat.shape[0]), + torch.randint_like(video_sum, 0, 10000) % video_sum, :] + z_a_norm = (_text_feat - _text_feat.mean(0)) / _text_feat.std(0) # NxN_sxD + z_b_norm = (_video_feat - _video_feat.mean(0)) / _video_feat.std(0) # NxN_txD + + # cross-correlation matrix + B, D = z_a_norm.shape + c = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / B # DxD + # loss + on_diag = torch.diagonal(c).add_(-1).pow_(2).sum() + off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum() + cdcr_loss = (on_diag * self.config.cdcr_alpha1 + off_diag * self.config.cdcr_alpha2) + return retrieve_logits, retrieve_logits.T, cdcr_loss + elif self.config.cdcr == 2: + # selecet max + max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])] + max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])] + + max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]), + max_idx2.flatten()] + max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]), + max_idx1.flatten()] + + t_feat = text_feat.reshape(-1, text_feat.shape[-1]) + t_mask = text_mask.flatten().type(torch.bool) + v_feat = video_feat.reshape(-1, text_feat.shape[-1]) + v_mask = video_mask.flatten().type(torch.bool) + t_feat = t_feat[t_mask] + v_feat = v_feat[v_mask] + max_t_feat = max_t_feat[v_mask] + max_v_feat = max_v_feat[t_mask] + + z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD + z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD + + x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD + x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD + + # cross-correlation matrix + N, D = z_a_norm.shape + c1 = torch.einsum('ac,ad->cd', z_a_norm, z_b_norm) / N # DxD + N, D = x_a_norm.shape + c2 = torch.einsum('ac,ad->cd', x_a_norm, x_b_norm) / N # DxD + c = (c1 + c2) / 2.0 + # loss + on_diag = torch.diagonal(c).add_(-1).pow_(2).sum() + off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum() + cdcr_loss = (on_diag * self.config.cdcr_alpha1 + off_diag * self.config.cdcr_alpha2) + return retrieve_logits, retrieve_logits.T, cdcr_loss + elif self.config.cdcr == 3: + # selecet max + max_idx1 = max_idx1[torch.arange(max_idx1.shape[0]), torch.arange(max_idx1.shape[1])] + max_idx2 = max_idx2[torch.arange(max_idx2.shape[0]), torch.arange(max_idx2.shape[1])] + + max_t_feat = text_feat[torch.arange(max_idx2.shape[0]).repeat_interleave(max_idx2.shape[1]), + max_idx2.flatten()].squeeze(1) + max_v_feat = video_feat[torch.arange(max_idx1.shape[0]).repeat_interleave(max_idx1.shape[1]), + max_idx1.flatten()].squeeze(1) + + t_feat = text_feat.reshape(-1, text_feat.shape[-1]) + t_mask = text_mask.flatten().type(torch.bool) + v_feat = video_feat.reshape(-1, video_feat.shape[-1]) + v_mask = video_mask.flatten().type(torch.bool) + t_feat = t_feat[t_mask] + v_feat = v_feat[v_mask] + max_t_feat = max_t_feat[v_mask] + max_v_feat = max_v_feat[t_mask] + text_weight = text_weight.flatten()[t_mask] + video_weight = video_weight.flatten()[v_mask] + + z_a_norm = (t_feat - t_feat.mean(0)) / t_feat.std(0) # (BxN_t)xD + z_b_norm = (max_v_feat - max_v_feat.mean(0)) / max_v_feat.std(0) # (BxN_t)xD + + x_a_norm = (v_feat - v_feat.mean(0)) / v_feat.std(0) # (BxN_v)xD + x_b_norm = (max_t_feat - max_t_feat.mean(0)) / max_t_feat.std(0) # (BxN_v)xD + + # cross-correlation matrix + N, D = z_a_norm.shape + B = text_feat.shape[0] + c1 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', z_a_norm, z_b_norm), + text_weight) / B # DxD + c2 = torch.einsum("acd,a->cd", torch.einsum('ac,ad->acd', x_a_norm, x_b_norm), + video_weight) / B # DxD + c = (c1 + c2) / 2.0 + # loss + on_diag = torch.diagonal(c).add_(-1).pow_(2).sum() + off_diag = c.flatten()[1:].view(D - 1, D + 1)[:, :-1].pow_(2).sum() + cdcr_loss = (on_diag * self.config.cdcr_alpha1 + off_diag * self.config.cdcr_alpha2) + return retrieve_logits, retrieve_logits.T, cdcr_loss + else: + return retrieve_logits, retrieve_logits.T, 0.0 + else: + return retrieve_logits, retrieve_logits.T, 0.0 \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/modeling_raw.py b/Downstream/Video-Text-Retrieval/modules/modeling_raw.py new file mode 100644 index 0000000..7ebda33 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/modeling_raw.py @@ -0,0 +1,464 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import torch +from torch import nn +import torch.nn.functional as F + + +from modules.until_module import PreTrainedModel, AllGather, CrossEn +from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip + +from modules.module_clip import CLIP, convert_weights +from modules import clip_evl +from modules import clip_kc +from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence +from ipdb import set_trace + +logger = logging.getLogger(__name__) +allgather = AllGather.apply + +class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, cross_config, *inputs, **kwargs): + super(CLIP4ClipPreTrainedModel, self).__init__(cross_config) + self.cross_config = cross_config + self.clip = None + self.cross = None + + @classmethod + def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs): + + task_config = None + if "task_config" in kwargs.keys(): + task_config = kwargs["task_config"] + if not hasattr(task_config, "local_rank"): + task_config.__dict__["local_rank"] = 0 + elif task_config.local_rank == -1: + task_config.local_rank = 0 + + if state_dict is None: state_dict = {} + pretrained_clip_name = "ViT-B/32" + if hasattr(task_config, 'pretrained_clip_name'): + pretrained_clip_name = task_config.pretrained_clip_name + clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name) + for key, val in clip_state_dict.items(): + new_key = "clip." + key + if new_key not in state_dict: + state_dict[new_key] = val.clone() + + cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config) + + model = cls(cross_config, clip_state_dict, *inputs, **kwargs) + + ## ===> Initialization trick [HARD CODE] + if model.linear_patch == "3d": + contain_conv2 = False + for key in state_dict.keys(): + if key.find("visual.conv2.weight") > -1: + contain_conv2 = True + break + if contain_conv2 is False and hasattr(model.clip.visual, "conv2"): + cp_weight = state_dict["clip.visual.conv1.weight"].clone() + kernel_size = model.clip.visual.conv2.weight.size(2) + conv2_size = model.clip.visual.conv2.weight.size() + conv2_size = list(conv2_size) + + left_conv2_size = conv2_size.copy() + right_conv2_size = conv2_size.copy() + left_conv2_size[2] = (kernel_size - 1) // 2 + right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2] + + left_zeros, right_zeros = None, None + if left_conv2_size[2] > 0: + left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device) + if right_conv2_size[2] > 0: + right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device) + + cat_list = [] + if left_zeros != None: cat_list.append(left_zeros) + cat_list.append(cp_weight.unsqueeze(2)) + if right_zeros != None: cat_list.append(right_zeros) + cp_weight = torch.cat(cat_list, dim=2) + + state_dict["clip.visual.conv2.weight"] = cp_weight + + if model.sim_header == 'tightTransf': + contain_cross = False + for key in state_dict.keys(): + if key.find("cross.transformer") > -1: + contain_cross = True + break + if contain_cross is False: + for key, val in clip_state_dict.items(): + if key == "positional_embedding": + state_dict["cross.embeddings.position_embeddings.weight"] = val.clone() + continue + if key.find("transformer.resblocks") == 0: + num_layer = int(key.split(".")[2]) + + # cut from beginning + if num_layer < task_config.cross_num_hidden_layers: + state_dict["cross."+key] = val.clone() + continue + + if model.sim_header == "seqLSTM" or model.sim_header == "seqTransf": + contain_frame_position = False + for key in state_dict.keys(): + if key.find("frame_position_embeddings") > -1: + contain_frame_position = True + break + if contain_frame_position is False: + for key, val in clip_state_dict.items(): + if key == "positional_embedding": + state_dict["frame_position_embeddings.weight"] = val.clone() + continue + if model.sim_header == "seqTransf" and key.find("transformer.resblocks") == 0: + num_layer = int(key.split(".")[2]) + # cut from beginning + if num_layer < task_config.cross_num_hidden_layers: + state_dict[key.replace("transformer.", "transformerClip.")] = val.clone() + continue + ## <=== End of initialization trick + + if state_dict is not None: + model = cls.init_preweight(model, state_dict, task_config=task_config) + + return model + +def show_log(task_config, info): + if task_config is None or task_config.local_rank == 0: + logger.warning(info) + +def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None): + if hasattr(source_config, source_attr_name): + if default_value is None or getattr(source_config, source_attr_name) != default_value: + setattr(target_config, target_attr_name, getattr(source_config, source_attr_name)) + show_log(source_config, "Set {}.{}: {}.".format(target_name, + target_attr_name, getattr(target_config, target_attr_name))) + return target_config + +def check_attr(target_name, task_config): + return hasattr(task_config, target_name) and task_config.__dict__[target_name] + +class CLIP4Clip(CLIP4ClipPreTrainedModel): + def __init__(self, cross_config, clip_state_dict, task_config): + super(CLIP4Clip, self).__init__(cross_config) + self.task_config = task_config + self.ignore_video_index = -1 + + assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings + + self._stage_one = True + self._stage_two = False + + show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two)) + + self.loose_type = False + if self._stage_one and check_attr('loose_type', self.task_config): + self.loose_type = True + show_log(task_config, "Test retrieval by loose type.") + + # CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===> + vit = "visual.proj" in clip_state_dict + assert vit + if vit: + vision_width = clip_state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in + [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = clip_state_dict["text_projection"].shape[1] + context_length = clip_state_dict["positional_embedding"].shape[0] + vocab_size = clip_state_dict["token_embedding.weight"].shape[0] + transformer_width = clip_state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks"))) + + show_log(task_config, "\t embed_dim: {}".format(embed_dim)) + show_log(task_config, "\t image_resolution: {}".format(image_resolution)) + show_log(task_config, "\t vision_layers: {}".format(vision_layers)) + show_log(task_config, "\t vision_width: {}".format(vision_width)) + show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size)) + show_log(task_config, "\t context_length: {}".format(context_length)) + show_log(task_config, "\t vocab_size: {}".format(vocab_size)) + show_log(task_config, "\t transformer_width: {}".format(transformer_width)) + show_log(task_config, "\t transformer_heads: {}".format(transformer_heads)) + show_log(task_config, "\t transformer_layers: {}".format(transformer_layers)) + + self.linear_patch = '2d' + if hasattr(task_config, "linear_patch"): + self.linear_patch = task_config.linear_patch + show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch)) + + # use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40 + cut_top_layer = 0 + show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer)) + self.clip = CLIP( + embed_dim, + image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer, + linear_patch=self.linear_patch + ).float() + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in clip_state_dict: + del clip_state_dict[key] + + convert_weights(self.clip) + # <=== End of CLIP Encoders + + self.sim_header = 'meanP' + if hasattr(task_config, "sim_header"): + self.sim_header = task_config.sim_header + show_log(task_config, "\t sim_header: {}".format(self.sim_header)) + if self.sim_header == "tightTransf": assert self.loose_type is False + + cross_config.max_position_embeddings = context_length + if self.loose_type is False: + # Cross Encoder ===> + cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers") + self.cross = CrossModel(cross_config) + # <=== End of Cross Encoder + self.similarity_dense = nn.Linear(cross_config.hidden_size, 1) + + if self.sim_header == "seqLSTM" or self.sim_header == "seqTransf": + self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size) + if self.sim_header == "seqTransf": + self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers, + heads=transformer_heads, ) + if self.sim_header == "seqLSTM": + self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size, + batch_first=True, bidirectional=False, num_layers=1) + + self.loss_fct = CrossEn() + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids, attention_mask, video, video_mask=None): + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + # T x 3 x H x W + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + sequence_output, visual_output = self.get_sequence_visual_output(input_ids, token_type_ids, attention_mask, + video, video_mask, shaped=True, video_frame=video_frame) + + if self.training: + loss = 0. + sim_matrix, *_tmp = self.get_similarity_logits(sequence_output, visual_output, attention_mask, video_mask, + shaped=True, loose_type=self.loose_type) + sim_loss1 = self.loss_fct(sim_matrix) + sim_loss2 = self.loss_fct(sim_matrix.T) + sim_loss = (sim_loss1 + sim_loss2) / 2 + loss += sim_loss + + return loss + else: + return None + + def get_sequence_output(self, input_ids, token_type_ids, attention_mask, shaped=False): + if shaped is False: + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + + bs_pair = input_ids.size(0) + sequence_hidden = self.clip.encode_text(input_ids).float() + sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1)) + + return sequence_hidden + + def get_visual_output(self, video, video_mask, shaped=False, video_frame=-1): + if shaped is False: + video_mask = video_mask.view(-1, video_mask.shape[-1]) + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + bs_pair = video_mask.size(0) + visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float() + visual_hidden = visual_hidden.view(bs_pair, -1, visual_hidden.size(-1)) + + return visual_hidden + + def get_sequence_visual_output(self, input_ids, token_type_ids, attention_mask, video, video_mask, shaped=False, video_frame=-1): + if shaped is False: + input_ids = input_ids.view(-1, input_ids.shape[-1]) + token_type_ids = token_type_ids.view(-1, token_type_ids.shape[-1]) + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + video = torch.as_tensor(video).float() + b, pair, bs, ts, channel, h, w = video.shape + video = video.view(b * pair * bs * ts, channel, h, w) + video_frame = bs * ts + + sequence_output = self.get_sequence_output(input_ids, token_type_ids, attention_mask, shaped=True) + visual_output = self.get_visual_output(video, video_mask, shaped=True, video_frame=video_frame) + + return sequence_output, visual_output + + def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask): + + concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames + concat_mask = torch.cat((attention_mask, video_mask), dim=1) + text_type_ = torch.zeros_like(attention_mask) + video_type_ = torch.ones_like(video_mask) + concat_type = torch.cat((text_type_, video_type_), dim=1) + + cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True) + cross_output = cross_layers[-1] + + return cross_output, pooled_output, concat_mask + + def _mean_pooling_for_similarity_sequence(self, sequence_output, attention_mask): + attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1) + attention_mask_un[:, 0, :] = 0. + sequence_output = sequence_output * attention_mask_un + text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float) + return text_out + + def _mean_pooling_for_similarity_visual(self, visual_output, video_mask,): + video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1) + visual_output = visual_output * video_mask_un + video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float) + video_mask_un_sum[video_mask_un_sum == 0.] = 1. + video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum + return video_out + + def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,): + text_out = self._mean_pooling_for_similarity_sequence(sequence_output, attention_mask) + video_out = self._mean_pooling_for_similarity_visual(visual_output, video_mask) + + return text_out, video_out + + def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"): + sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() + + if sim_header == "meanP": + # Default: Parameter-free type + pass + elif sim_header == "seqLSTM": + # Sequential type: LSTM + visual_output_original = visual_output + visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(), + batch_first=True, enforce_sorted=False) + visual_output, _ = self.lstm_visual(visual_output) + if self.training: self.lstm_visual.flatten_parameters() + visual_output, _ = pad_packed_sequence(visual_output, batch_first=True) + visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1) + visual_output = visual_output + visual_output_original + elif sim_header == "seqTransf": + # Sequential type: Transformer Encoder + visual_output_original = visual_output + seq_length = visual_output.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device) + position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1) + frame_position_embeddings = self.frame_position_embeddings(position_ids) + visual_output = visual_output + frame_position_embeddings + + extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0 + extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1) + visual_output = visual_output.permute(1, 0, 2) # NLD -> LND + visual_output = self.transformerClip(visual_output, extended_video_mask) + visual_output = visual_output.permute(1, 0, 2) # LND -> NLD + visual_output = visual_output + visual_output_original + + if self.training: + visual_output = allgather(visual_output, self.task_config) + video_mask = allgather(video_mask, self.task_config) + sequence_output = allgather(sequence_output, self.task_config) + torch.distributed.barrier() + + visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) + visual_output = self._mean_pooling_for_similarity_visual(visual_output, video_mask) + visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True) + + sequence_output = sequence_output.squeeze(1) + sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True) + + logit_scale = self.clip.logit_scale.exp() + retrieve_logits = logit_scale * torch.matmul(sequence_output, visual_output.t()) + return retrieve_logits + + def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask): + sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous() + + b_text, s_text, h_text = sequence_output.size() + b_visual, s_visual, h_visual = visual_output.size() + + retrieve_logits_list = [] + + step_size = b_text # set smaller to reduce memory cost + split_size = [step_size] * (b_text // step_size) + release_size = b_text - sum(split_size) + if release_size > 0: + split_size += [release_size] + + # due to clip text branch retrun the last hidden + attention_mask = torch.ones(sequence_output.size(0), 1)\ + .to(device=attention_mask.device, dtype=attention_mask.dtype) + + sequence_output_splits = torch.split(sequence_output, split_size, dim=0) + attention_mask_splits = torch.split(attention_mask, split_size, dim=0) + for i in range(len(split_size)): + sequence_output_row = sequence_output_splits[i] + attention_mask_row = attention_mask_splits[i] + sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1) + sequence_output_l = sequence_output_l.view(-1, s_text, h_text) + attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1) + attention_mask_l = attention_mask_l.view(-1, s_text) + + step_truth = sequence_output_row.size(0) + visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1) + visual_output_r = visual_output_r.view(-1, s_visual, h_visual) + video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1) + video_mask_r = video_mask_r.view(-1, s_visual) + + cross_output, pooled_output, concat_mask = \ + self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r) + retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual) + + retrieve_logits_list.append(retrieve_logits_row) + + retrieve_logits = torch.cat(retrieve_logits_list, dim=0) + return retrieve_logits + + def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, shaped=False, loose_type=False): + if shaped is False: + attention_mask = attention_mask.view(-1, attention_mask.shape[-1]) + video_mask = video_mask.view(-1, video_mask.shape[-1]) + + contrastive_direction = () + if loose_type: + assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"] + retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header) + else: + assert self.sim_header in ["tightTransf"] + retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, ) + + return retrieve_logits, contrastive_direction \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/module_clip.py b/Downstream/Video-Text-Retrieval/modules/module_clip.py new file mode 100644 index 0000000..dd2d108 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/module_clip.py @@ -0,0 +1,562 @@ +""" +Adapted from: https://github.com/openai/CLIP/blob/main/clip/clip.py +""" +from collections import OrderedDict +from typing import Tuple, Union + +import hashlib +import os +import urllib +import warnings +from tqdm import tqdm + +import torch +import torch.nn.functional as F +from torch import nn + + +_MODELS = { + "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", + "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", + "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", + "RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} +_PT_NAME = { + "RN50": "RN50.pt", + "RN101": "RN101.pt", + "RN50x4": "RN50x4.pt", + "RN50x16": "RN50x16.pt", + "ViT-B/32": "ViT-B-32.pt", + "ViT-B/16": "ViT-B-16.pt", + "ViT-L/14": "ViT-L-14.pt", +} + +from ipdb import set_trace + +def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + +def available_models(): + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + +# ============================= + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super().__init__() + + # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() + + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + + self.relu = nn.ReLU(inplace=True) + self.downsample = None + self.stride = stride + + if stride > 1 or inplanes != planes * Bottleneck.expansion: + # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 + self.downsample = nn.Sequential(OrderedDict([ + ("-1", nn.AvgPool2d(stride)), + ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), + ("1", nn.BatchNorm2d(planes * self.expansion)) + ])) + + def forward(self, x: torch.Tensor): + identity = x + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.avgpool(out) + out = self.bn3(self.conv3(out)) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + return out + + +class AttentionPool2d(nn.Module): + def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC + x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC + x, _ = F.multi_head_attention_forward( + query=x, key=x, value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0, + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False + ) + + return x[0] + + +class ModifiedResNet(nn.Module): + """ + A ResNet class that is similar to torchvision's but contains the following changes: + - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. + - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 + - The final pooling layer is a QKV attention instead of an average pool + """ + + def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): + super().__init__() + self.output_dim = output_dim + self.input_resolution = input_resolution + + # the 3-layer stem + self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(width // 2) + self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(width // 2) + self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(width) + self.avgpool = nn.AvgPool2d(2) + self.relu = nn.ReLU(inplace=True) + + # residual layers + self._inplanes = width # this is a *mutable* variable used during construction + self.layer1 = self._make_layer(width, layers[0]) + self.layer2 = self._make_layer(width * 2, layers[1], stride=2) + self.layer3 = self._make_layer(width * 4, layers[2], stride=2) + self.layer4 = self._make_layer(width * 8, layers[3], stride=2) + + embed_dim = width * 32 # the ResNet feature dimension + self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) + + def _make_layer(self, planes, blocks, stride=1): + layers = [Bottleneck(self._inplanes, planes, stride)] + + self._inplanes = planes * Bottleneck.expansion + for _ in range(1, blocks): + layers.append(Bottleneck(self._inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + def stem(x): + for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]: + x = self.relu(bn(conv(x))) + x = self.avgpool(x) + return x + + x = x.type(self.conv1.weight.dtype) + x = stem(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.attnpool(x) + + return x + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask=None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + attn_mask_ = self.attn_mask + if self.attn_mask is not None and hasattr(self.attn_mask, '__call__'): + attn_mask_ = self.attn_mask(x.size(0)) # LND + + attn_mask_ = attn_mask_.to(dtype=x.dtype, device=x.device) if attn_mask_ is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0] + + def forward(self, x_tuple:tuple): + x, video_frame = x_tuple + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return (x, video_frame) + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor, video_frame=-1): + return self.resblocks((x, video_frame))[0] + + +class VisualTransformer(nn.Module): + def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int, + linear_patch: str = '2d',): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads) + + self.ln_post = LayerNorm(width) + self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) + + # For 3D + assert linear_patch in ['2d', '3d'] + self.linear_patch = linear_patch + if self.linear_patch == '3d': + self.conv2 = nn.Conv3d(in_channels=3, out_channels=width, kernel_size=(3, patch_size, patch_size), + stride=(1, patch_size, patch_size), padding=(1, 0, 0), bias=False) + + def forward(self, x: torch.Tensor, video_frame=-1): + + if self.linear_patch == '3d': + assert video_frame != -1 + x_3d = x.reshape(-1, video_frame, x.shape[-3], x.shape[-2], x.shape[-1]) + x_3d = x_3d.permute(0, 2, 1, 3, 4) + x_3d = self.conv2(x_3d) # shape = [*, width, frame, grid, grid] + x_3d = x_3d.permute(0, 2, 1, 3, 4) # shape = [*, frame, width, grid, grid] + x = x_3d.reshape(-1, x_3d.shape[-3], x_3d.shape[-2], x_3d.shape[-1]).contiguous() # shape = [*, width, grid, grid] + else: + x = self.conv1(x) # shape = [*, width, grid, grid] + + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x, video_frame=video_frame) + x = x.permute(1, 0, 2) # LND -> NLD + + # Move the three lines below to `encode_image` for entire hidden sequence + # x = self.ln_post(x[:, 0, :]) + # if self.proj is not None: + # x = x @ self.proj + + return x + + +class CLIP(nn.Module): + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # vision linear of patch + linear_patch: str = '2d', + ): + super().__init__() + + self.context_length = context_length + + if isinstance(vision_layers, (tuple, list)): + vision_heads = vision_width * 32 // 64 + self.visual = ModifiedResNet( + layers=vision_layers, + output_dim=embed_dim, + heads=vision_heads, + input_resolution=image_resolution, + width=vision_width + ) + else: + vision_heads = vision_width // 64 + self.visual = VisualTransformer( + input_resolution=image_resolution, + patch_size=vision_patch_size, + width=vision_width, + layers=vision_layers, + heads=vision_heads, + output_dim=embed_dim, + linear_patch=linear_patch + ) + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([])) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + if isinstance(self.visual, ModifiedResNet): + if self.visual.attnpool is not None: + std = self.visual.attnpool.c_proj.in_features ** -0.5 + nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) + + for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: + for name, param in resnet_block.named_parameters(): + if name.endswith("bn3.weight"): + nn.init.zeros_(param) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + @staticmethod + def get_config(pretrained_clip_name="ViT-B/32"): + model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "ViT-B-32.pt") + if pretrained_clip_name in _MODELS and pretrained_clip_name in _PT_NAME: + model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), _PT_NAME[pretrained_clip_name]) + + if pretrained_clip_name in ["ViT-B/32", "ViT-B/16"] and os.path.exists(model_path): + pass + else: + if pretrained_clip_name in _MODELS: + model_path = _download(_MODELS[pretrained_clip_name]) + elif os.path.isfile(pretrained_clip_name): + model_path = pretrained_clip_name + else: + raise RuntimeError(f"Model {pretrained_clip_name} not found; available models = {available_models()}") + + try: + # loading JIT archive + model = torch.jit.load(model_path, map_location="cpu").eval() + state_dict = model.state_dict() + except RuntimeError: + state_dict = torch.load(model_path, map_location="cpu") + + return state_dict + + def build_attention_mask(self, context_length): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.zeros(context_length, context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_image(self, image, return_hidden=False, video_frame=-1): + hidden = self.visual(image.type(self.dtype), video_frame=video_frame) + hidden = self.visual.ln_post(hidden) @ self.visual.proj + + x = hidden[:, 0, :] + + if return_hidden: + return x, hidden + + return x + + def encode_text(self, text, return_hidden=False): + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + pos_emd = self.positional_embedding[:x.size(1), :].type(self.dtype) + x = x + pos_emd + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + + hidden = self.ln_final(x).type(self.dtype) @ self.text_projection + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = hidden[torch.arange(hidden.shape[0]), text.argmax(dim=-1)] + + if return_hidden: + return x, hidden + + return x + + def forward(self, image, text): + image_features = self.encode_image(image) + text_features = self.encode_text(text) + + # normalized features + image_features = image_features / image_features.norm(dim=-1, keepdim=True) + text_features = text_features / text_features.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logit_scale * text_features @ image_features.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_image, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, nn.MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + + +def build_model(state_dict: dict): + vit = "visual.proj" in state_dict + + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + convert_weights(model) + model.load_state_dict(state_dict) + return model.eval() diff --git a/Downstream/Video-Text-Retrieval/modules/module_cross.py b/Downstream/Video-Text-Retrieval/modules/module_cross.py new file mode 100644 index 0000000..b2cc6a2 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/module_cross.py @@ -0,0 +1,223 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import copy +import json +import math +import logging +import tarfile +import tempfile +import shutil + +import torch +from torch import nn +import torch.nn.functional as F +from .file_utils import cached_path +from .until_config import PretrainedConfig +from .until_module import PreTrainedModel, LayerNorm, ACT2FN +from collections import OrderedDict + +logger = logging.getLogger(__name__) + +PRETRAINED_MODEL_ARCHIVE_MAP = {} +CONFIG_NAME = 'cross_config.json' +WEIGHTS_NAME = 'cross_pytorch_model.bin' + + +class CrossConfig(PretrainedConfig): + """Configuration class to store the configuration of a `CrossModel`. + """ + pretrained_model_archive_map = PRETRAINED_MODEL_ARCHIVE_MAP + config_name = CONFIG_NAME + weights_name = WEIGHTS_NAME + def __init__(self, + vocab_size_or_config_json_file, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02): + """Constructs CrossConfig. + + Args: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `CrossModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu" and "swish" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `CrossModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + """ + if isinstance(vocab_size_or_config_json_file, str): + with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + else: + raise ValueError("First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)") + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.n_head = n_head + + def attention(self, x: torch.Tensor, attn_mask: torch.Tensor): + attn_mask_ = attn_mask.repeat_interleave(self.n_head, dim=0) + return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0] + + def forward(self, para_tuple: tuple): + # x: torch.Tensor, attn_mask: torch.Tensor + # print(para_tuple) + x, attn_mask = para_tuple + x = x + self.attention(self.ln_1(x), attn_mask) + x = x + self.mlp(self.ln_2(x)) + return (x, attn_mask) + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads) for _ in range(layers)]) + + def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): + return self.resblocks((x, attn_mask))[0] + +class CrossEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(CrossEmbeddings, self).__init__() + + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + # self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + # self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, concat_embeddings, concat_type=None): + + batch_size, seq_length = concat_embeddings.size(0), concat_embeddings.size(1) + # if concat_type is None: + # concat_type = torch.zeros(batch_size, concat_type).to(concat_embeddings.device) + + position_ids = torch.arange(seq_length, dtype=torch.long, device=concat_embeddings.device) + position_ids = position_ids.unsqueeze(0).expand(concat_embeddings.size(0), -1) + + # token_type_embeddings = self.token_type_embeddings(concat_type) + position_embeddings = self.position_embeddings(position_ids) + + embeddings = concat_embeddings + position_embeddings # + token_type_embeddings + # embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + +class CrossPooler(nn.Module): + def __init__(self, config): + super(CrossPooler, self).__init__() + self.ln_pool = LayerNorm(config.hidden_size) + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = QuickGELU() + + def forward(self, hidden_states, hidden_mask): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + hidden_states = self.ln_pool(hidden_states) + pooled_output = hidden_states[:, 0] + pooled_output = self.dense(pooled_output) + pooled_output = self.activation(pooled_output) + return pooled_output + +class CrossModel(PreTrainedModel): + + def initialize_parameters(self): + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + def __init__(self, config): + super(CrossModel, self).__init__(config) + + self.embeddings = CrossEmbeddings(config) + + transformer_width = config.hidden_size + transformer_layers = config.num_hidden_layers + transformer_heads = config.num_attention_heads + self.transformer = Transformer(width=transformer_width, layers=transformer_layers, heads=transformer_heads,) + self.pooler = CrossPooler(config) + self.apply(self.init_weights) + + def build_attention_mask(self, attention_mask): + extended_attention_mask = attention_mask.unsqueeze(1) + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -1000000.0 + extended_attention_mask = extended_attention_mask.expand(-1, attention_mask.size(1), -1) + return extended_attention_mask + + def forward(self, concat_input, concat_type=None, attention_mask=None, output_all_encoded_layers=True): + + if attention_mask is None: + attention_mask = torch.ones(concat_input.size(0), concat_input.size(1)) + if concat_type is None: + concat_type = torch.zeros_like(attention_mask) + + extended_attention_mask = self.build_attention_mask(attention_mask) + + embedding_output = self.embeddings(concat_input, concat_type) + embedding_output = embedding_output.permute(1, 0, 2) # NLD -> LND + embedding_output = self.transformer(embedding_output, extended_attention_mask) + embedding_output = embedding_output.permute(1, 0, 2) # LND -> NLD + + pooled_output = self.pooler(embedding_output, hidden_mask=attention_mask) + + return embedding_output, pooled_output diff --git a/Downstream/Video-Text-Retrieval/modules/optimization.py b/Downstream/Video-Text-Retrieval/modules/optimization.py new file mode 100644 index 0000000..f040b83 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/optimization.py @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for BERT model.""" + +import math +import torch +from torch.optim import Optimizer +from torch.optim.optimizer import required +from torch.nn.utils import clip_grad_norm_ +import logging + +logger = logging.getLogger(__name__) + +def warmup_cosine(x, warmup=0.002): + if x < warmup: + return x/warmup + return 0.5 * (1.0 + math.cos(math.pi * x)) + +def warmup_constant(x, warmup=0.002): + """ Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps. + Learning rate is 1. afterwards. """ + if x < warmup: + return x/warmup + return 1.0 + +def warmup_linear(x, warmup=0.002): + """ Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step. + After `t_total`-th training step, learning rate is zero. """ + if x < warmup: + return x/warmup + return max((x-1.)/(warmup-1.), 0) + +SCHEDULES = { + 'warmup_cosine': warmup_cosine, + 'warmup_constant': warmup_constant, + 'warmup_linear': warmup_linear, +} + + +class BertAdam(Optimizer): + """Implements BERT version of Adam algorithm with weight decay fix. + Params: + lr: learning rate + warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 + t_total: total number of training steps for the learning + rate schedule, -1 means constant learning rate. Default: -1 + schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' + b1: Adams b1. Default: 0.9 + b2: Adams b2. Default: 0.999 + e: Adams epsilon. Default: 1e-6 + weight_decay: Weight decay. Default: 0.01 + max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 + """ + def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', + b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, + max_grad_norm=1.0): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) + if schedule not in SCHEDULES: + raise ValueError("Invalid schedule parameter: {}".format(schedule)) + if not 0.0 <= warmup < 1.0 and not warmup == -1: + raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) + if not 0.0 <= b1 < 1.0: + raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) + if not 0.0 <= b2 < 1.0: + raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) + if not e >= 0.0: + raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) + defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, + b1=b1, b2=b2, e=e, weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + super(BertAdam, self).__init__(params, defaults) + + def get_lr(self): + lr = [] + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + state = self.state[p] + if len(state) == 0: + return [0] + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + lr.append(lr_scheduled) + return lr + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['next_m'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['next_v'] = torch.zeros_like(p.data) + + next_m, next_v = state['next_m'], state['next_v'] + beta1, beta2 = group['b1'], group['b2'] + + # Add grad clipping + if group['max_grad_norm'] > 0: + clip_grad_norm_(p, group['max_grad_norm']) + + # Decay the first and second moment running average coefficient + # In-place operations to update the averages at the same time + # next_m.mul_(beta1).add_(1 - beta1, grad) --> pytorch 1.7 + next_m.mul_(beta1).add_(grad, alpha=1 - beta1) + # next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) --> pytorch 1.7 + next_v.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + update = next_m / (next_v.sqrt() + group['e']) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + progress = state['step']/group['t_total'] + lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup']) + else: + lr_scheduled = group['lr'] + + update_with_lr = lr_scheduled * update + p.data.add_(-update_with_lr) + + state['step'] += 1 + + return loss \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/tokenization_clip.py b/Downstream/Video-Text-Retrieval/modules/tokenization_clip.py new file mode 100644 index 0000000..3fbb56d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/tokenization_clip.py @@ -0,0 +1,145 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + self.vocab = self.encoder + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text + + def tokenize(self, text): + tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' ')) + return tokens + + def convert_tokens_to_ids(self, tokens): + return [self.encoder[bpe_token] for bpe_token in tokens] \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/until_config.py b/Downstream/Video-Text-Retrieval/modules/until_config.py new file mode 100644 index 0000000..596c157 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/until_config.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import copy +import json +import logging +import tarfile +import tempfile +import shutil +import torch +from .file_utils import cached_path + +logger = logging.getLogger(__name__) + +class PretrainedConfig(object): + + pretrained_model_archive_map = {} + config_name = "" + weights_name = "" + + @classmethod + def get_config(cls, pretrained_model_name, cache_dir, type_vocab_size, state_dict, task_config=None): + archive_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), pretrained_model_name) + if os.path.exists(archive_file) is False: + if pretrained_model_name in cls.pretrained_model_archive_map: + archive_file = cls.pretrained_model_archive_map[pretrained_model_name] + else: + archive_file = pretrained_model_name + + # redirect to the cache, if necessary + try: + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) + except FileNotFoundError: + if task_config is None or task_config.local_rank == 0: + logger.error( + "Model name '{}' was not found in model name list. " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name, + archive_file)) + return None + if resolved_archive_file == archive_file: + if task_config is None or task_config.local_rank == 0: + logger.info("loading archive file {}".format(archive_file)) + else: + if task_config is None or task_config.local_rank == 0: + logger.info("loading archive file {} from cache at {}".format( + archive_file, resolved_archive_file)) + tempdir = None + if os.path.isdir(resolved_archive_file): + serialization_dir = resolved_archive_file + else: + # Extract archive to temp dir + tempdir = tempfile.mkdtemp() + if task_config is None or task_config.local_rank == 0: + logger.info("extracting archive file {} to temp dir {}".format( + resolved_archive_file, tempdir)) + with tarfile.open(resolved_archive_file, 'r:gz') as archive: + archive.extractall(tempdir) + serialization_dir = tempdir + # Load config + config_file = os.path.join(serialization_dir, cls.config_name) + config = cls.from_json_file(config_file) + config.type_vocab_size = type_vocab_size + if task_config is None or task_config.local_rank == 0: + logger.info("Model config {}".format(config)) + + if state_dict is None: + weights_path = os.path.join(serialization_dir, cls.weights_name) + if os.path.exists(weights_path): + state_dict = torch.load(weights_path, map_location='cpu') + else: + if task_config is None or task_config.local_rank == 0: + logger.info("Weight doesn't exsits. {}".format(weights_path)) + + if tempdir: + # Clean up temp dir + shutil.rmtree(tempdir) + + return config, state_dict + + @classmethod + def from_dict(cls, json_object): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + config = cls(vocab_size_or_config_json_file=-1) + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/modules/until_module.py b/Downstream/Video-Text-Retrieval/modules/until_module.py new file mode 100644 index 0000000..5ae873a --- /dev/null +++ b/Downstream/Video-Text-Retrieval/modules/until_module.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + +import logging +import numpy as np +import torch +from torch import nn +import torch.nn.functional as F +import math +from modules.until_config import PretrainedConfig + +logger = logging.getLogger(__name__) + +def gelu(x): + """Implementation of the gelu activation function. + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + +def swish(x): + return x * torch.sigmoid(x) + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + +class LayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(LayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias + +class PreTrainedModel(nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, config, *inputs, **kwargs): + super(PreTrainedModel, self).__init__() + if not isinstance(config, PretrainedConfig): + raise ValueError( + "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " + "To create a model from a Google pretrained model use " + "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + self.__class__.__name__, self.__class__.__name__ + )) + self.config = config + + def init_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, LayerNorm): + if 'beta' in dir(module) and 'gamma' in dir(module): + module.beta.data.zero_() + module.gamma.data.fill_(1.0) + else: + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + def resize_token_embeddings(self, new_num_tokens=None): + raise NotImplementedError + + @classmethod + def init_preweight(cls, model, state_dict, prefix=None, task_config=None): + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if 'gamma' in key: + new_key = key.replace('gamma', 'weight') + if 'beta' in key: + new_key = key.replace('beta', 'bias') + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + if prefix is not None: + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + old_keys.append(key) + new_keys.append(prefix + key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(model, prefix='') + + if prefix is None and (task_config is None or task_config.local_rank == 0): + logger.info("-" * 20) + if len(missing_keys) > 0: + logger.info("Weights of {} not initialized from pretrained model: {}" + .format(model.__class__.__name__, "\n " + "\n ".join(missing_keys))) + if len(unexpected_keys) > 0: + logger.info("Weights from pretrained model not used in {}: {}" + .format(model.__class__.__name__, "\n " + "\n ".join(unexpected_keys))) + if len(error_msgs) > 0: + logger.error("Weights from pretrained model cause errors in {}: {}" + .format(model.__class__.__name__, "\n " + "\n ".join(error_msgs))) + + return model + + @property + def dtype(self): + """ + :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). + """ + try: + return next(self.parameters()).dtype + except StopIteration: + # For nn.DataParallel compatibility in PyTorch 1.5 + def find_tensor_attributes(module: nn.Module): + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = self._named_members(get_members_fn=find_tensor_attributes) + first_tuple = next(gen) + return first_tuple[1].dtype + + @classmethod + def from_pretrained(cls, config, state_dict=None, *inputs, **kwargs): + """ + Instantiate a PreTrainedModel from a pre-trained model file or a pytorch state dict. + Download and cache the pre-trained model file if needed. + """ + # Instantiate model. + model = cls(config, *inputs, **kwargs) + if state_dict is None: + return model + model = cls.init_preweight(model, state_dict) + + return model + +################################## +###### LOSS FUNCTION ############# +################################## +class CrossEn(nn.Module): + def __init__(self,): + super(CrossEn, self).__init__() + + def forward(self, sim_matrix): + logpt = F.log_softmax(sim_matrix, dim=-1) + logpt = torch.diag(logpt) + nce_loss = -logpt + sim_loss = nce_loss.mean() + return sim_loss + +class MILNCELoss(nn.Module): + def __init__(self, batch_size=1, n_pair=1,): + super(MILNCELoss, self).__init__() + self.batch_size = batch_size + self.n_pair = n_pair + torch_v = float(".".join(torch.__version__.split(".")[:2])) + self.bool_dtype = torch.bool if torch_v >= 1.3 else torch.uint8 + + def forward(self, sim_matrix): + mm_mask = np.eye(self.batch_size) + mm_mask = np.kron(mm_mask, np.ones((self.n_pair, self.n_pair))) + mm_mask = torch.tensor(mm_mask).float().to(sim_matrix.device) + + from_text_matrix = sim_matrix + mm_mask * -1e12 + from_video_matrix = sim_matrix.transpose(1, 0) + + new_sim_matrix = torch.cat([from_video_matrix, from_text_matrix], dim=-1) + logpt = F.log_softmax(new_sim_matrix, dim=-1) + + mm_mask_logpt = torch.cat([mm_mask, torch.zeros_like(mm_mask)], dim=-1) + masked_logpt = logpt + (torch.ones_like(mm_mask_logpt) - mm_mask_logpt) * -1e12 + + new_logpt = -torch.logsumexp(masked_logpt, dim=-1) + + logpt_choice = torch.zeros_like(new_logpt) + mark_ind = torch.arange(self.batch_size).to(sim_matrix.device) * self.n_pair + (self.n_pair//2) + logpt_choice[mark_ind] = 1 + sim_loss = new_logpt.masked_select(logpt_choice.to(dtype=self.bool_dtype)).mean() + return sim_loss + +class MaxMarginRankingLoss(nn.Module): + def __init__(self, + margin=1.0, + negative_weighting=False, + batch_size=1, + n_pair=1, + hard_negative_rate=0.5, + ): + super(MaxMarginRankingLoss, self).__init__() + self.margin = margin + self.n_pair = n_pair + self.batch_size = batch_size + easy_negative_rate = 1 - hard_negative_rate + self.easy_negative_rate = easy_negative_rate + self.negative_weighting = negative_weighting + if n_pair > 1 and batch_size > 1: + alpha = easy_negative_rate / ((batch_size - 1) * (1 - easy_negative_rate)) + mm_mask = (1 - alpha) * np.eye(self.batch_size) + alpha + mm_mask = np.kron(mm_mask, np.ones((n_pair, n_pair))) + mm_mask = torch.tensor(mm_mask) * (batch_size * (1 - easy_negative_rate)) + self.mm_mask = mm_mask.float() + + def forward(self, x): + d = torch.diag(x) + max_margin = F.relu(self.margin + x - d.view(-1, 1)) + \ + F.relu(self.margin + x - d.view(1, -1)) + if self.negative_weighting and self.n_pair > 1 and self.batch_size > 1: + max_margin = max_margin * self.mm_mask.to(max_margin.device) + return max_margin.mean() + +class AllGather(torch.autograd.Function): + """An autograd function that performs allgather on a tensor.""" + + @staticmethod + def forward(ctx, tensor, args): + output = [torch.empty_like(tensor) for _ in range(args.world_size)] + torch.distributed.all_gather(output, tensor) + ctx.rank = args.rank + ctx.batch_size = tensor.shape[0] + return torch.cat(output, dim=0) + + @staticmethod + def backward(ctx, grad_output): + return ( + grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)], + None, + ) diff --git a/Downstream/Video-Text-Retrieval/preprocess/compress_video.py b/Downstream/Video-Text-Retrieval/preprocess/compress_video.py new file mode 100644 index 0000000..23f1c20 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/preprocess/compress_video.py @@ -0,0 +1,85 @@ +""" +Used to compress video in: https://github.com/ArrowLuo/CLIP4Clip +Author: ArrowLuo +""" +import os +import argparse +import ffmpeg +import subprocess +import time +import multiprocessing +from multiprocessing import Pool +import shutil +try: + from psutil import cpu_count +except: + from multiprocessing import cpu_count +# multiprocessing.freeze_support() + +def compress(paras): + input_video_path, output_video_path = paras + try: + command = ['ffmpeg', + '-y', # (optional) overwrite output file if it exists + '-i', input_video_path, + '-filter:v', + 'scale=\'if(gt(a,1),trunc(oh*a/2)*2,224)\':\'if(gt(a,1),224,trunc(ow*a/2)*2)\'', # scale to 224 + '-map', '0:v', + '-r', '3', # frames per second + output_video_path, + ] + ffmpeg = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = ffmpeg.communicate() + retcode = ffmpeg.poll() + # print something above for debug + except Exception as e: + raise e + +def prepare_input_output_pairs(input_root, output_root): + input_video_path_list = [] + output_video_path_list = [] + for root, dirs, files in os.walk(input_root): + for file_name in files: + input_video_path = os.path.join(root, file_name) + output_video_path = os.path.join(output_root, file_name) + if os.path.exists(output_video_path) and os.path.getsize(output_video_path) > 0: + pass + else: + input_video_path_list.append(input_video_path) + output_video_path_list.append(output_video_path) + return input_video_path_list, output_video_path_list + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Compress video for speed-up') + parser.add_argument('--input_root', type=str, help='input root') + parser.add_argument('--output_root', type=str, help='output root') + args = parser.parse_args() + + input_root = args.input_root + output_root = args.output_root + + assert input_root != output_root + + if not os.path.exists(output_root): + os.makedirs(output_root, exist_ok=True) + + input_video_path_list, output_video_path_list = prepare_input_output_pairs(input_root, output_root) + + print("Total video need to process: {}".format(len(input_video_path_list))) + num_works = cpu_count() + print("Begin with {}-core logical processor.".format(num_works)) + + pool = Pool(num_works) + data_dict_list = pool.map(compress, + [(input_video_path, output_video_path) for + input_video_path, output_video_path in + zip(input_video_path_list, output_video_path_list)]) + pool.close() + pool.join() + + print("Compress finished, wait for checking files...") + for input_video_path, output_video_path in zip(input_video_path_list, output_video_path_list): + if os.path.exists(input_video_path): + if os.path.exists(output_video_path) is False or os.path.getsize(output_video_path) < 1.: + shutil.copyfile(input_video_path, output_video_path) + print("Copy and replace file: {}".format(output_video_path)) \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/util.py b/Downstream/Video-Text-Retrieval/util.py new file mode 100644 index 0000000..6b11cd4 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/util.py @@ -0,0 +1,73 @@ +import torch +import torch.nn as nn +import threading +from torch._utils import ExceptionWrapper +import logging + +def get_a_var(obj): + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, list) or isinstance(obj, tuple): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + +def parallel_apply(fct, model, inputs, device_ids): + modules = nn.parallel.replicate(model, device_ids) + assert len(modules) == len(inputs) + lock = threading.Lock() + results = {} + grad_enabled = torch.is_grad_enabled() + + def _worker(i, module, input): + torch.set_grad_enabled(grad_enabled) + device = get_a_var(input).get_device() + try: + with torch.cuda.device(device): + # this also avoids accidental slicing of `input` if it is a Tensor + if not isinstance(input, (list, tuple)): + input = (input,) + output = fct(module, *input) + with lock: + results[i] = output + except Exception: + with lock: + results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device)) + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, args=(i, module, input)) + for i, (module, input) in enumerate(zip(modules, inputs))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, ExceptionWrapper): + output.reraise() + outputs.append(output) + return outputs + +def get_logger(filename=None): + logger = logging.getLogger('logger') + logger.setLevel(logging.DEBUG) + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO) + if filename is not None: + handler = logging.FileHandler(filename) + handler.setLevel(logging.DEBUG) + handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) + logging.getLogger().addHandler(handler) + return logger \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_activitynet.sh b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_activitynet.sh new file mode 100644 index 0000000..5e51240 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_activitynet.sh @@ -0,0 +1,29 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=spot -N1 --job-name=zs_activity \ +python -u -m main_task_retrieval \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=5e-3 \ + --batch_size=8 \ + --batch_size_val=8 \ + --data_path="./data/ActivityNet/" \ + --features_path="" \ + --datatype="activity" \ + --max_words=77 \ + --max_frames=16 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ + --zeroshot \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_didemo.sh b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_didemo.sh new file mode 100644 index 0000000..b6fb0aa --- /dev/null +++ b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_didemo.sh @@ -0,0 +1,27 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=spot -N1 --job-name=zs_didemo \ +python -u -m main_task_retrieval \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=5e-3 \ + --batch_size=8 \ + --batch_size_val=32 \ + --data_path="./data/DiDeMo/" \ + --features_path="" \ + --datatype="didemo" \ + --max_words=77 \ + --max_frames=32 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ diff --git a/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_lsmdc.sh b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_lsmdc.sh new file mode 100644 index 0000000..61045e2 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_lsmdc.sh @@ -0,0 +1,28 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=spot -N1 --job-name=zs_lsmdc \ +python -u -m main_task_retrieval \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=5e-3 \ + --batch_size=8 \ + --batch_size_val=32 \ + --data_path="./data/LSMDC/" \ + --features_path="" \ + --datatype="lsmdc" \ + --max_words=77 \ + --max_frames=6 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ + --mergeclip=true \ diff --git a/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_msrvtt.sh b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_msrvtt.sh new file mode 100644 index 0000000..efa3687 --- /dev/null +++ b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_msrvtt.sh @@ -0,0 +1,28 @@ +# srun -p video -N1 -n1 --gres=gpu:1 --cpus-per-task=16 --quotatype=auto --job-name=zs_msrvtt \ +python -u -m main_task_retrieval \ + --do_eval \ + --num_thread_reader=4 \ + --epochs=5 \ + --batch_size=8 \ + --n_display=50 \ + --train_csv="${DATA_PATH}/MSRVTT_train.9k.csv" \ + --val_csv="${DATA_PATH}/MSRVTT_JSFUSION_test.csv" \ + --data_path="${DATA_PATH}/MSRVTT_data.json" \ + --lr=1e-4 \ + --max_words=77 \ + --max_frames=8 \ + --batch_size_val=16 \ + --datatype="msrvtt" \ + --expand_msrvtt_sentences \ + --feature_framerate=1 \ + --coef_lr=1e-3 \ + --freeze_layer_num=-1 \ + --slice_framepos=2 \ + --linear_patch=2d \ + --sim_header=meanP \ + --loose_type \ + --pretrained_clip_name="ViT-L/14" \ + --clip_evl \ + --pretrained_path="" \ + --output_dir="" \ + --mergeclip=True \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_msvd.sh b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_msvd.sh new file mode 100644 index 0000000..711620d --- /dev/null +++ b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_msvd.sh @@ -0,0 +1,29 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=spot -N1 --job-name=zs_msvd \ +python -u -m main_task_retrieval \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-3 \ + --coef_lr=5e-3 \ + --batch_size=8 \ + --batch_size_val=32 \ + --features_path="" \ + --data_path="" \ + --datatype="msvd" \ + --max_words=77 \ + --max_frames=12 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --clip_evl \ + --output_dir="" \ + --pretrained_path="" \ + --mergeclip=true \ + --mergeweight=0.25 \ \ No newline at end of file diff --git a/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_vatex.sh b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_vatex.sh new file mode 100644 index 0000000..640a11b --- /dev/null +++ b/Downstream/Video-Text-Retrieval/zeroshot_scripts/eval_vatex.sh @@ -0,0 +1,29 @@ +# srun -p video --gres=gpu:1 --cpus-per-task=16 --quotatype=spot -N1 --job-name=zs_vatex \ +python -u -m main_task_retrieval \ + --do_eval \ + --num_thread_reader=8 \ + --n_display=50 \ + --epochs=5 \ + --lr=1e-4 \ + --coef_lr=1e-3 \ + --batch_size=128 \ + --batch_size_val=128 \ + --features_path="" \ + --data_path="./data/VATEX/" \ + --datatype="vatex" \ + --max_words=77 \ + --max_frames=8 \ + --feature_framerate=1 \ + --pretrained_clip_name="ViT-L/14" \ + --slice_framepos=2 \ + --loose_type \ + --linear_patch=2d \ + --interaction=no \ + --sim_header=meanP \ + --freeze_layer_num=0 \ + --expand_msrvtt_sentences \ + --output_dir="" \ + --pretrained_path="" \ + --clip_evl \ + --mergeclip=true \ + --mergeweight=0.5 \ diff --git a/Downstream/Visual-Language-Navigation/.gitignore b/Downstream/Visual-Language-Navigation/.gitignore new file mode 100644 index 0000000..c8a1332 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/.gitignore @@ -0,0 +1,143 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +**/*.pyc + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +.github +.pylintrc +.pre-commit-config.yaml +.readthedocs.yml + +# custom +/data +.vscode +.idea +*.pkl +*.pkl.json +*.log.json +benchlist.txt +work_dirs/ + +# Pytorch +*.pth + +# Profile +*.prof + +# added by wentao +*.avi +temp/ +*.npz +*.pkl +experiments/*/results* +experiments/*/logs +*.o +*backup* +tools/data/mit/log* +tools/data/*log* +.github/workflows +demo/hmdb51* +demo/ucf101* +demo/*.avi +demo/*.mp4 \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/README.md b/Downstream/Visual-Language-Navigation/README.md new file mode 100755 index 0000000..7d79718 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/README.md @@ -0,0 +1,18 @@ +# VLN-CE Downstream +This is an official implementation of the visual-language navigation task in [InternVideo](https://arxiv.org/abs/2212.03191). + +## Running the code + +We currently provide evaluation of our pretrained model. + +### Conda environment preperation +1. Please follow https://github.com/jacobkrantz/VLN-CE to install Habitat Simulator and Habitat-lab. We use Python 3.6 in our experiments. +2. Follow https://github.com/openai/CLIP to install CLIP. + +### Data/Model preperation +1. Follow https://github.com/jacobkrantz/VLN-CE to download Matterport3D environment to `data/scene_datasets`. Data ahould have the form `data/scene_datasets/mp3d/{scene}/{scene}.glb`. +2. Download preporcessed VLN-CE dataset from https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/vln/dataset.zip to `data/datasets`. Data should have the form `data/datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/{split}` and `data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}`. +3. Download pretrained models from https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/vln/pretrained.zip to `pretrained`. It should have 6 folders: `pretrained/pretrained_models`, `pretrained/VideoMAE`, `pretrained/wp_pred`, `pretrained/ddppo-models`, `pretrained/Prevalent`, `pretrained/wp_pred`. + +### Running the code +Simply run `bash eval_**.sh` to start evaluating the agent. \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/eval_base.bash b/Downstream/Visual-Language-Navigation/eval_base.bash new file mode 100644 index 0000000..31d7ed6 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/eval_base.bash @@ -0,0 +1,17 @@ +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] bash iter_eval.bash + +ngpus=1 +flag="--exp_name eval_base + --run-type eval + --exp-config iter_train.yaml + + SIMULATOR_GPU_IDS [0] + TORCH_GPU_IDS [0] + GPU_NUMBERS $ngpus + NUM_ENVIRONMENTS 11 + + EVAL.SAVE_RESULTS False + EVAL.CKPT_PATH_DIR pretrained/pretrained_models/base_ckpt.iter6900.pth + TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + " +python run.py $flag diff --git a/Downstream/Visual-Language-Navigation/eval_large.bash b/Downstream/Visual-Language-Navigation/eval_large.bash new file mode 100644 index 0000000..076656e --- /dev/null +++ b/Downstream/Visual-Language-Navigation/eval_large.bash @@ -0,0 +1,19 @@ +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] bash iter_eval.bash + +ngpus=1 +flag="--exp_name eval_large + --run-type eval + --exp-config iter_train.yaml + + SIMULATOR_GPU_IDS [0] + TORCH_GPU_IDS [0] + GPU_NUMBERS $ngpus + NUM_ENVIRONMENTS 11 + + MODEL.RGB_ENCODER.backbone_type VideoIntern-Large + + EVAL.SAVE_RESULTS False + EVAL.CKPT_PATH_DIR pretrained/pretrained_models/large_ckpt.iter9000.pth + TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + " +python run.py $flag diff --git a/Downstream/Visual-Language-Navigation/exp/cma_r2r.yaml b/Downstream/Visual-Language-Navigation/exp/cma_r2r.yaml new file mode 100755 index 0000000..2f3331b --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/cma_r2r.yaml @@ -0,0 +1,49 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/r2r_vlnce.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: schedulesampler-CMA +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test + USE_CKPT_CONFIG: False + SAMPLE: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: r2r + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 50 + batch_size: 16 + schedule_ratio: 0.75 + decay_time: 10 + max_traj_len: 20 + max_text_len: 512 + +MODEL: + policy_name: PolicyViewSelectionCMA + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 512 + directional: 64 + INSTRUCTION_ENCODER: + bidirectional: True + use_pretrained_embeddings: False + embedding_size: 256 + hidden_size: 256 diff --git a/Downstream/Visual-Language-Navigation/exp/cma_rxr_en.yaml b/Downstream/Visual-Language-Navigation/exp/cma_rxr_en.yaml new file mode 100644 index 0000000..8502152 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/cma_rxr_en.yaml @@ -0,0 +1,58 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/rxr_vlnce_en.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: schedulesampler-CMA +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test_challenge + LANGUAGES: [en-US, en-IN] + SAMPLE: True + USE_CKPT_CONFIG: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: rxr + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + LANGUAGES: [en-US, en-IN] + EPISODE_COUNT: -1 + SAMPLE: True + +RL: + POLICY: + OBS_TRANSFORMS: + ENABLED_TRANSFORMS: [ResizerPerSensor, CenterCropperPerSensor] + +IL: + epochs: 30 + batch_size: 16 + schedule_ratio: 0.75 + decay_time: 10 + max_traj_len: 30 + max_text_len: 512 + +MODEL: + policy_name: PolicyViewSelectionCMA + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 512 + directional: 64 + INSTRUCTION_ENCODER: + sensor_uuid: rxr_instruciton # instruction + bidirectional: True + use_pretrained_embeddings: False + embedding_size: 768 # 256 + hidden_size: 256 diff --git a/Downstream/Visual-Language-Navigation/exp/cma_rxr_hi.yaml b/Downstream/Visual-Language-Navigation/exp/cma_rxr_hi.yaml new file mode 100644 index 0000000..fbda743 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/cma_rxr_hi.yaml @@ -0,0 +1,58 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/rxr_vlnce_hi.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: schedulesampler-CMA +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test_challenge + LANGUAGES: [hi-IN] + SAMPLE: True + USE_CKPT_CONFIG: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: rxr + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + LANGUAGES: [hi-IN] + EPISODE_COUNT: -1 + SAMPLE: True + +RL: + POLICY: + OBS_TRANSFORMS: + ENABLED_TRANSFORMS: [ResizerPerSensor, CenterCropperPerSensor] + +IL: + epochs: 30 + batch_size: 16 + schedule_ratio: 0.75 + decay_time: 10 + max_traj_len: 30 + max_text_len: 512 + +MODEL: + policy_name: PolicyViewSelectionCMA + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 512 + directional: 64 + INSTRUCTION_ENCODER: + sensor_uuid: rxr_instruciton # instruction + bidirectional: True + use_pretrained_embeddings: False + embedding_size: 768 # 256 + hidden_size: 256 diff --git a/Downstream/Visual-Language-Navigation/exp/cma_rxr_te.yaml b/Downstream/Visual-Language-Navigation/exp/cma_rxr_te.yaml new file mode 100644 index 0000000..0585b5b --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/cma_rxr_te.yaml @@ -0,0 +1,58 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/rxr_vlnce_te.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: schedulesampler-CMA +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test_challenge + LANGUAGES: [te-IN] + SAMPLE: True + USE_CKPT_CONFIG: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: rxr + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + LANGUAGES: [te-IN] + EPISODE_COUNT: -1 + SAMPLE: True + +RL: + POLICY: + OBS_TRANSFORMS: + ENABLED_TRANSFORMS: [ResizerPerSensor, CenterCropperPerSensor] + +IL: + epochs: 30 + batch_size: 16 + schedule_ratio: 0.75 + decay_time: 10 + max_traj_len: 30 + max_text_len: 512 + +MODEL: + policy_name: PolicyViewSelectionCMA + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 512 + directional: 64 + INSTRUCTION_ENCODER: + sensor_uuid: rxr_instruciton # instruction + bidirectional: True + use_pretrained_embeddings: False + embedding_size: 768 # 256 + hidden_size: 256 diff --git a/Downstream/Visual-Language-Navigation/exp/vlnbert_r2r.yaml b/Downstream/Visual-Language-Navigation/exp/vlnbert_r2r.yaml new file mode 100755 index 0000000..7593297 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/vlnbert_r2r.yaml @@ -0,0 +1,48 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/r2r_vlnce.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: schedulesampler-VLNBERT +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test + USE_CKPT_CONFIG: False + SAMPLE: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: r2r + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 50 + batch_size: 16 + schedule_ratio: 0.75 + decay_time: 10 + max_traj_len: 20 + max_text_len: 300 + +MODEL: + policy_name: PolicyViewSelectionVLNBERT + NUM_ANGLES: 12 + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 768 + directional: 128 + INSTRUCTION_ENCODER: + bidirectional: True + diff --git a/Downstream/Visual-Language-Navigation/exp/vlnbert_r2r_da.yaml b/Downstream/Visual-Language-Navigation/exp/vlnbert_r2r_da.yaml new file mode 100644 index 0000000..270fb33 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/vlnbert_r2r_da.yaml @@ -0,0 +1,55 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/r2r_vlnce.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: dagger +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 2 #TODO 15 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test + USE_CKPT_CONFIG: False + SAMPLE: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: r2r + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + waypoint_aug: True + epochs: 4 + batch_size: 4 #TODO 32 + max_traj_len: 20 + max_text_len: 300 + + DAGGER: + iterations: 4 #TODO 10 + update_size: 60 #TODO 6000 + lmdb_fp16: True + p: 0.75 + preload_lmdb_features: False #TODO False + lmdb_features_dir: data/trajectories_dirs/debug/trajectories.lmdb #TODO + +MODEL: + policy_name: PolicyViewSelectionVLNBERT + NUM_ANGLES: 12 + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 768 + directional: 128 + INSTRUCTION_ENCODER: + bidirectional: True + diff --git a/Downstream/Visual-Language-Navigation/exp/vlnbert_rxr_en.yaml b/Downstream/Visual-Language-Navigation/exp/vlnbert_rxr_en.yaml new file mode 100644 index 0000000..ea4ebd2 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/exp/vlnbert_rxr_en.yaml @@ -0,0 +1,57 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/rxr_vlnce_en.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: schedulesampler-VLNBERT +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ + +INFERENCE: + SPLIT: test_challenge + LANGUAGES: [en-US, en-IN] + SAMPLE: True + USE_CKPT_CONFIG: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: rxr + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + LANGUAGES: [en-US, en-IN] + EPISODE_COUNT: -1 + SAMPLE: True + +RL: + POLICY: + OBS_TRANSFORMS: + ENABLED_TRANSFORMS: [ResizerPerSensor, CenterCropperPerSensor] + +IL: + epochs: 30 + batch_size: 16 + schedule_ratio: 0.5 + decay_time: 10 + max_traj_len: 30 + max_text_len: 300 + +MODEL: + policy_name: PolicyViewSelectionVLNBERT + NUM_ANGLES: 12 + spatial_output: False + RGB_ENCODER: + output_size: 512 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 768 + directional: 128 + INSTRUCTION_ENCODER: + bidirectional: True + sensor_uuid: rxr_instruction + embedding_size: 768 diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/__init__.py b/Downstream/Visual-Language-Navigation/habitat_extensions/__init__.py new file mode 100755 index 0000000..77f0b23 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/__init__.py @@ -0,0 +1,4 @@ +from habitat_extensions import measures, obs_transformers, sensors, nav +from habitat_extensions.config.default import get_extended_config +from habitat_extensions.task import VLNCEDatasetV1 +from habitat_extensions.habitat_simulator import Simulator diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/config/__init__.py b/Downstream/Visual-Language-Navigation/habitat_extensions/config/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/config/default.py b/Downstream/Visual-Language-Navigation/habitat_extensions/config/default.py new file mode 100755 index 0000000..ed122ae --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/config/default.py @@ -0,0 +1,154 @@ +from typing import List, Optional, Union + +from habitat.config.default import Config as CN +from habitat.config.default import get_config + +_C = get_config() +_C.defrost() + +# ---------------------------------------------------------------------------- +# CUSTOM ACTION: HIGHTOLOWINFERENCE ACTION +# ---------------------------------------------------------------------------- +_C.TASK.ACTIONS.HIGHTOLOWINFERENCE = CN() +_C.TASK.ACTIONS.HIGHTOLOWINFERENCE.TYPE = 'MoveHighToLowActionInference' +# ---------------------------------------------------------------------------- +# CUSTOM ACTION: HIGHTOLOWEVAL ACTION +# ---------------------------------------------------------------------------- +_C.TASK.ACTIONS.HIGHTOLOWEVAL = CN() +_C.TASK.ACTIONS.HIGHTOLOWEVAL.TYPE = 'MoveHighToLowActionEval' +# ---------------------------------------------------------------------------- +# CUSTOM ACTION: HIGHTOLOW ACTION +# ---------------------------------------------------------------------------- +_C.TASK.ACTIONS.HIGHTOLOW = CN() +_C.TASK.ACTIONS.HIGHTOLOW.TYPE = 'MoveHighToLowAction' +# ---------------------------------------------------------------------------- +# GPS SENSOR +# ---------------------------------------------------------------------------- +_C.TASK.GLOBAL_GPS_SENSOR = CN() +_C.TASK.GLOBAL_GPS_SENSOR.TYPE = "GlobalGPSSensor" +_C.TASK.GLOBAL_GPS_SENSOR.DIMENSIONALITY = 3 +# ---------------------------------------------------------------------------- +# # RXR INSTRUCTION SENSOR +# ---------------------------------------------------------------------------- +_C.TASK.RXR_INSTRUCTION_SENSOR = CN() +_C.TASK.RXR_INSTRUCTION_SENSOR.TYPE = "RxRInstructionSensor" +_C.TASK.RXR_INSTRUCTION_SENSOR.features_path = "data/datasets/RxR_VLNCE_v0/text_features/rxr_{split}/{id:06}_{lang}_text_features.npz" +_C.TASK.RXR_INSTRUCTION_SENSOR.max_text_len = 512 +_C.TASK.INSTRUCTION_SENSOR_UUID = "rxr_instruction" +# ---------------------------------------------------------------------------- +# SHORTEST PATH SENSOR (previously: VLN_ORACLE_ACTION_SENSOR) +# ---------------------------------------------------------------------------- +_C.TASK.SHORTEST_PATH_SENSOR = CN() +_C.TASK.SHORTEST_PATH_SENSOR.TYPE = "ShortestPathSensor" +# all goals can be navigated to within 0.5m. +_C.TASK.SHORTEST_PATH_SENSOR.GOAL_RADIUS = 0.5 +# compatibility with the dataset generation oracle and paper results. +# if False, use the ShortestPathFollower in Habitat +_C.TASK.SHORTEST_PATH_SENSOR.USE_ORIGINAL_FOLLOWER = False +# ---------------------------------------------------------------------------- +# VLN ORACLE PROGRESS SENSOR +# ---------------------------------------------------------------------------- +_C.TASK.VLN_ORACLE_PROGRESS_SENSOR = CN() +_C.TASK.VLN_ORACLE_PROGRESS_SENSOR.TYPE = "VLNOracleProgressSensor" +# ---------------------------------------------------------------------------- +# NDTW MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.NDTW = CN() +_C.TASK.NDTW.TYPE = "NDTW" +_C.TASK.NDTW.SPLIT = "val_seen" +_C.TASK.NDTW.FDTW = True # False: DTW +_C.TASK.NDTW.GT_PATH = ( + "data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json" +) +_C.TASK.NDTW.SUCCESS_DISTANCE = 3.0 +# ---------------------------------------------------------------------------- +# SDTW MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.SDTW = CN() +_C.TASK.SDTW.TYPE = "SDTW" +# ---------------------------------------------------------------------------- +# PATH_LENGTH MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.PATH_LENGTH = CN() +_C.TASK.PATH_LENGTH.TYPE = "PathLength" +# ---------------------------------------------------------------------------- +# ORACLE_NAVIGATION_ERROR MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.ORACLE_NAVIGATION_ERROR = CN() +_C.TASK.ORACLE_NAVIGATION_ERROR.TYPE = "OracleNavigationError" +# ---------------------------------------------------------------------------- +# ORACLE_SUCCESS MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.ORACLE_SUCCESS = CN() +_C.TASK.ORACLE_SUCCESS.TYPE = "OracleSuccess" +_C.TASK.ORACLE_SUCCESS.SUCCESS_DISTANCE = 3.0 +# ---------------------------------------------------------------------------- +# ORACLE_SPL MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.ORACLE_SPL = CN() +_C.TASK.ORACLE_SPL.TYPE = "OracleSPL" +# ---------------------------------------------------------------------------- +# STEPS_TAKEN MEASUREMENT +# ---------------------------------------------------------------------------- +_C.TASK.STEPS_TAKEN = CN() +_C.TASK.STEPS_TAKEN.TYPE = "StepsTaken" +# ---------------------------------------------------------------------------- +# POSITION MEASUREMENT For faster eval +# ---------------------------------------------------------------------------- +_C.TASK.POSITION = CN() +_C.TASK.POSITION.TYPE = 'Position' +# ----------------------------------------------------------------------------- +# TOP_DOWN_MAP_VLNCE MEASUREMENT +# ----------------------------------------------------------------------------- +_C.TASK.TOP_DOWN_MAP_VLNCE = CN() +_C.TASK.TOP_DOWN_MAP_VLNCE.TYPE = "TopDownMapVLNCE" +_C.TASK.TOP_DOWN_MAP_VLNCE.MAX_EPISODE_STEPS = _C.ENVIRONMENT.MAX_EPISODE_STEPS +_C.TASK.TOP_DOWN_MAP_VLNCE.MAP_RESOLUTION = 1024 +_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_SOURCE_AND_TARGET = True +_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_BORDER = True +_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_SHORTEST_PATH = True +_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_REFERENCE_PATH = True +_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_FIXED_WAYPOINTS = True +_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_MP3D_AGENT_PATH = True +_C.TASK.TOP_DOWN_MAP_VLNCE.GRAPHS_FILE = "data/connectivity_graphs.pkl" +_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR = CN() +_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.DRAW = True +_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.FOV = 79 +_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.VISIBILITY_DIST = 5.0 +# ---------------------------------------------------------------------------- +# DATASET EXTENSIONS +# ---------------------------------------------------------------------------- +_C.DATASET.ROLES = ["guide"] # options: "*", "guide", "follower" +# language options by region: "*", "te-IN", "hi-IN", "en-US", "en-IN" +_C.DATASET.LANGUAGES = ["*"] +# a list or set of episode IDs to allow in dataset creation. None allows all. +_C.DATASET.EPISODES_ALLOWED = None + + +def get_extended_config( + config_paths: Optional[Union[List[str], str]] = None, + opts: Optional[list] = None, +) -> CN: + r"""Create a unified config with default values overwritten by values from + :p:`config_paths` and overwritten by options from :p:`opts`. + + :param config_paths: List of config paths or string that contains comma + separated list of config paths. + :param opts: Config options (keys, values) in a list (e.g., passed from + command line into the config. For example, + :py:`opts = ['FOO.BAR', 0.5]`. Argument can be used for parameter + sweeping or quick tests. + """ + config = _C.clone() + + if config_paths: + if isinstance(config_paths, str): + config_paths = [config_paths] + + for config_path in config_paths: + config.merge_from_file(config_path) + + if opts: + config.merge_from_list(opts) + config.freeze() + return config diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/config/r2r_vlnce.yaml b/Downstream/Visual-Language-Navigation/habitat_extensions/config/r2r_vlnce.yaml new file mode 100755 index 0000000..885c885 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/config/r2r_vlnce.yaml @@ -0,0 +1,60 @@ +ENVIRONMENT: + MAX_EPISODE_STEPS: 20 + +SIMULATOR: + ACTION_SPACE_CONFIG: v0 + AGENT_0: + SENSORS: [RGB_SENSOR, DEPTH_SENSOR] + FORWARD_STEP_SIZE: 0.25 + TURN_ANGLE: 45 + HABITAT_SIM_V0: + GPU_DEVICE_ID: 0 + ALLOW_SLIDING: True + RGB_SENSOR: + WIDTH: 224 + HEIGHT: 224 + HFOV: 90 + TYPE: HabitatSimRGBSensor + DEPTH_SENSOR: + WIDTH: 256 # pretrained DDPPO resnet needs 256x256 + HEIGHT: 256 + HFOV: 90 + TYPE: + Sim-v1 + +TASK: + TYPE: VLN-v0 + SUCCESS_DISTANCE: 3.0 + SENSORS: [ + INSTRUCTION_SENSOR, + # SHORTEST_PATH_SENSOR, + # VLN_ORACLE_PROGRESS_SENSOR + ] + INSTRUCTION_SENSOR_UUID: instruction + POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, HIGHTOLOW] + MEASUREMENTS: [ + # DISTANCE_TO_GOAL, + # SUCCESS, + # SPL, + # NDTW, + # PATH_LENGTH, + # ORACLE_SUCCESS, + # STEPS_TAKEN + ] + SUCCESS: + SUCCESS_DISTANCE: 3.0 + SPL: + SUCCESS_DISTANCE: 3.0 + NDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json.gz + SDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json.gz + ORACLE_SUCCESS: + SUCCESS_DISTANCE: 3.0 +DATASET: + TYPE: VLN-CE-v1 + SPLIT: train + DATA_PATH: data/datasets/R2R_VLNCE_v1-2_preprocessed_BERTidx/{split}/{split}_bertidx.json.gz + SCENES_DIR: data/scene_datasets/ diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_en.yaml b/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_en.yaml new file mode 100644 index 0000000..a248631 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_en.yaml @@ -0,0 +1,81 @@ +ENVIRONMENT: + MAX_EPISODE_STEPS: 20 + +SIMULATOR: + TYPE: + Sim-v1 + TURN_ANGLE: 30 + TILT_ANGLE: 30 + ACTION_SPACE_CONFIG: v0 # v1 + AGENT_0: + SENSORS: [RGB_SENSOR, DEPTH_SENSOR] + HEIGHT: 0.88 + RADIUS: 0.18 + HABITAT_SIM_V0: + GPU_DEVICE_ID: 0 + ALLOW_SLIDING: False + RGB_SENSOR: + WIDTH: 640 + HEIGHT: 480 + HFOV: 79 + POSITION: [0, 0.88, 0] + DEPTH_SENSOR: + WIDTH: 640 + HEIGHT: 480 + HFOV: 79 + MIN_DEPTH: 0.5 + MAX_DEPTH: 5.0 + POSITION: [0, 0.88, 0] + +TASK: + TYPE: VLN-v0 + # POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, LOOK_UP, LOOK_DOWN] + POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, HIGHTOLOW] + SUCCESS_DISTANCE: 3.0 + SENSORS: [ + RXR_INSTRUCTION_SENSOR, + SHORTEST_PATH_SENSOR, + VLN_ORACLE_PROGRESS_SENSOR + ] + INSTRUCTION_SENSOR_UUID: rxr_instruction + + MEASUREMENTS: [ + # STEPS_TAKEN, + # PATH_LENGTH, + # DISTANCE_TO_GOAL, + # SUCCESS, + # ORACLE_SUCCESS, + # SPL, + # NDTW, + # TOP_DOWN_MAP_VLNCE + ] + ######################################### + # TOP_DOWN_MAP_VLNCE: + # DRAW_SHORTEST_PATH: False + # DRAW_REFERENCE_PATH: True + # DRAW_FIXED_WAYPOINTS: False + # DRAW_MP3D_AGENT_PATH: False + # FOG_OF_WAR: + # DRAW: True + # FOV: 360 + + SUCCESS: + SUCCESS_DISTANCE: 3.0 + SPL: + SUCCESS_DISTANCE: 3.0 + NDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz + SDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz + ORACLE_SUCCESS: + SUCCESS_DISTANCE: 3.0 + +DATASET: + TYPE: RxR-VLN-CE-v1 + SPLIT: train + ROLES: [guide] # "*", "guide", "follower" + LANGUAGES: [en-US, en-IN] # "*", "te-IN", "hi-IN", "en-US", "en-IN" + DATA_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}.json.gz + SCENES_DIR: data/scene_datasets/ diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_hi.yaml b/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_hi.yaml new file mode 100644 index 0000000..6efb1b5 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_hi.yaml @@ -0,0 +1,81 @@ +ENVIRONMENT: + MAX_EPISODE_STEPS: 20 + +SIMULATOR: + TYPE: + Sim-v1 + TURN_ANGLE: 30 + TILT_ANGLE: 30 + ACTION_SPACE_CONFIG: v0 # v1 + AGENT_0: + SENSORS: [RGB_SENSOR, DEPTH_SENSOR] + HEIGHT: 0.88 + RADIUS: 0.18 + HABITAT_SIM_V0: + GPU_DEVICE_ID: 0 + ALLOW_SLIDING: False + RGB_SENSOR: + WIDTH: 640 + HEIGHT: 480 + HFOV: 79 + POSITION: [0, 0.88, 0] + DEPTH_SENSOR: + WIDTH: 640 + HEIGHT: 480 + HFOV: 79 + MIN_DEPTH: 0.5 + MAX_DEPTH: 5.0 + POSITION: [0, 0.88, 0] + +TASK: + TYPE: VLN-v0 + # POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, LOOK_UP, LOOK_DOWN] + POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, HIGHTOLOW] + SUCCESS_DISTANCE: 3.0 + SENSORS: [ + RXR_INSTRUCTION_SENSOR, + SHORTEST_PATH_SENSOR, + VLN_ORACLE_PROGRESS_SENSOR + ] + INSTRUCTION_SENSOR_UUID: rxr_instruction + + MEASUREMENTS: [ + # STEPS_TAKEN, + # PATH_LENGTH, + # DISTANCE_TO_GOAL, + # SUCCESS, + # ORACLE_SUCCESS, + # SPL, + # NDTW, + # TOP_DOWN_MAP_VLNCE + ] + ######################################### + # TOP_DOWN_MAP_VLNCE: + # DRAW_SHORTEST_PATH: False + # DRAW_REFERENCE_PATH: True + # DRAW_FIXED_WAYPOINTS: False + # DRAW_MP3D_AGENT_PATH: False + # FOG_OF_WAR: + # DRAW: True + # FOV: 360 + + SUCCESS: + SUCCESS_DISTANCE: 3.0 + SPL: + SUCCESS_DISTANCE: 3.0 + NDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz + SDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz + ORACLE_SUCCESS: + SUCCESS_DISTANCE: 3.0 + +DATASET: + TYPE: RxR-VLN-CE-v1 + SPLIT: train + ROLES: [guide] # "*", "guide", "follower" + LANGUAGES: [hi-IN] # "*", "te-IN", "hi-IN", "en-US", "en-IN" + DATA_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}.json.gz + SCENES_DIR: data/scene_datasets/ diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_te.yaml b/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_te.yaml new file mode 100644 index 0000000..f90d805 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/config/rxr_vlnce_te.yaml @@ -0,0 +1,81 @@ +ENVIRONMENT: + MAX_EPISODE_STEPS: 20 + +SIMULATOR: + TYPE: + Sim-v1 + TURN_ANGLE: 30 + TILT_ANGLE: 30 + ACTION_SPACE_CONFIG: v0 # v1 + AGENT_0: + SENSORS: [RGB_SENSOR, DEPTH_SENSOR] + HEIGHT: 0.88 + RADIUS: 0.18 + HABITAT_SIM_V0: + GPU_DEVICE_ID: 0 + ALLOW_SLIDING: False + RGB_SENSOR: + WIDTH: 640 + HEIGHT: 480 + HFOV: 79 + POSITION: [0, 0.88, 0] + DEPTH_SENSOR: + WIDTH: 640 + HEIGHT: 480 + HFOV: 79 + MIN_DEPTH: 0.5 + MAX_DEPTH: 5.0 + POSITION: [0, 0.88, 0] + +TASK: + TYPE: VLN-v0 + # POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, LOOK_UP, LOOK_DOWN] + POSSIBLE_ACTIONS: [STOP, MOVE_FORWARD, TURN_LEFT, TURN_RIGHT, HIGHTOLOW] + SUCCESS_DISTANCE: 3.0 + SENSORS: [ + RXR_INSTRUCTION_SENSOR, + SHORTEST_PATH_SENSOR, + VLN_ORACLE_PROGRESS_SENSOR + ] + INSTRUCTION_SENSOR_UUID: rxr_instruction + + MEASUREMENTS: [ + # STEPS_TAKEN, + # PATH_LENGTH, + # DISTANCE_TO_GOAL, + # SUCCESS, + # ORACLE_SUCCESS, + # SPL, + # NDTW, + # TOP_DOWN_MAP_VLNCE + ] + ######################################### + # TOP_DOWN_MAP_VLNCE: + # DRAW_SHORTEST_PATH: False + # DRAW_REFERENCE_PATH: True + # DRAW_FIXED_WAYPOINTS: False + # DRAW_MP3D_AGENT_PATH: False + # FOG_OF_WAR: + # DRAW: True + # FOV: 360 + + SUCCESS: + SUCCESS_DISTANCE: 3.0 + SPL: + SUCCESS_DISTANCE: 3.0 + NDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz + SDTW: + SUCCESS_DISTANCE: 3.0 + GT_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz + ORACLE_SUCCESS: + SUCCESS_DISTANCE: 3.0 + +DATASET: + TYPE: RxR-VLN-CE-v1 + SPLIT: train + ROLES: [guide] # "*", "guide", "follower" + LANGUAGES: [te-IN] # "*", "te-IN", "hi-IN", "en-US", "en-IN" + DATA_PATH: data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}.json.gz + SCENES_DIR: data/scene_datasets/ diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/habitat_simulator.py b/Downstream/Visual-Language-Navigation/habitat_extensions/habitat_simulator.py new file mode 100755 index 0000000..d920c6e --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/habitat_simulator.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Optional, + Sequence, + Set, + Union, + cast, +) + +import numpy as np +from gym import spaces +from gym.spaces.box import Box +from numpy import ndarray + +if TYPE_CHECKING: + from torch import Tensor + +import habitat_sim + +from habitat_sim.simulator import MutableMapping, MutableMapping_T +from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim +from habitat.core.dataset import Episode +from habitat.core.registry import registry +from habitat.core.simulator import ( + AgentState, + Config, + DepthSensor, + Observations, + RGBSensor, + SemanticSensor, + Sensor, + SensorSuite, + ShortestPathPoint, + Simulator, + VisualObservation, +) +from habitat.core.spaces import Space +from collections import OrderedDict + +# inherit habitat-lab/habitat/sims/habitat_simulator/habitat_simulator.py +@registry.register_simulator(name="Sim-v1") +class Simulator(HabitatSim): + r"""Simulator wrapper over habitat-sim + habitat-sim repo: https://github.com/facebookresearch/habitat-sim + Args: + config: configuration for initializing the simulator. + """ + + def __init__(self, config: Config) -> None: + super().__init__(config) + + def step_without_obs(self, + action: Union[str, int, MutableMapping_T[int, Union[str, int]]], + dt: float = 1.0 / 60.0,): + self._num_total_frames += 1 + if isinstance(action, MutableMapping): + return_single = False + else: + action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action}) + return_single = True + collided_dict: Dict[int, bool] = {} + for agent_id, agent_act in action.items(): + agent = self.get_agent(agent_id) + collided_dict[agent_id] = agent.act(agent_act) + self.__last_state[agent_id] = agent.get_state() + + # # step physics by dt + # step_start_Time = time.time() + # super().step_world(dt) + # self._previous_step_time = time.time() - step_start_Time + + multi_observations = {} + for agent_id in action.keys(): + agent_observation = {} + agent_observation["collided"] = collided_dict[agent_id] + multi_observations[agent_id] = agent_observation + + if return_single: + sim_obs = multi_observations[self._default_agent_id] + else: + sim_obs = multi_observations + + self._prev_sim_obs = sim_obs + + def step_with_specific_sensors(self, + sensors, + action: Union[str, int, MutableMapping_T[int, Union[str, int]]], + dt: float = 1.0 / 60.0,): + self._num_total_frames += 1 + if isinstance(action, MutableMapping): + return_single = False + else: + action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action}) + return_single = True + collided_dict: Dict[int, bool] = {} + for agent_id, agent_act in action.items(): + agent = self.get_agent(agent_id) + collided_dict[agent_id] = agent.act(agent_act) + self.__last_state[agent_id] = agent.get_state() + + # # step physics by dt + # step_start_Time = time.time() + # super().step_world(dt) + # self._previous_step_time = time.time() - step_start_Time + + multi_observations = self.get_specific_sensors_observations(sensors = sensors) + for agent_id in action.keys(): + agent_observation = {} + agent_observation["collided"] = collided_dict[agent_id] + multi_observations[agent_id] = agent_observation + + if return_single: + sim_obs = multi_observations[self._default_agent_id] + else: + sim_obs = multi_observations + + self._prev_sim_obs = sim_obs + + return multi_observations + + def get_specific_sensors_observations( + self, sensors, agent_ids: Union[int, List[int]] = 0, + ) -> Union[ + Dict[str, Union[ndarray, "Tensor"]], + Dict[int, Dict[str, Union[ndarray, "Tensor"]]], + ]: + if isinstance(agent_ids, int): + agent_ids = [agent_ids] + return_single = True + else: + return_single = False + + for agent_id in agent_ids: + agent_sensorsuite = sensors[agent_id] + for _sensor_uuid, sensor in agent_sensorsuite.items(): + sensor.draw_observation() + + # As backport. All Dicts are ordered in Python >= 3.7 + observations: Dict[int, Dict[str, Union[ndarray, "Tensor"]]] = OrderedDict() + for agent_id in agent_ids: + agent_observations: Dict[str, Union[ndarray, "Tensor"]] = {} + for sensor_uuid, sensor in sensors[agent_id].items(): + agent_observations[sensor_uuid] = sensor.get_observation() + observations[agent_id] = agent_observations + if return_single: + return next(iter(observations.values())) + return observations + + # def render_specific_sensors(self, sensors, mode: str = "rgb") -> Any: + # r""" + # Args: + # mode: sensor whose observation is used for returning the frame, + # eg: "rgb", "depth", "semantic" + + # Returns: + # rendered frame according to the mode + # """ + # sim_obs = self.get_specific_sensors_observations(sensors = sensors) + # observations = self._sensor_suite.get_observations(sim_obs) + + # output = observations.get(mode) + # assert output is not None, "mode {} sensor is not active".format(mode) + # if not isinstance(output, np.ndarray): + # # If it is not a numpy array, it is a torch tensor + # # The function expects the result to be a numpy array + # output = output.to("cpu").numpy() + + # return output \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/maps.py b/Downstream/Visual-Language-Navigation/habitat_extensions/maps.py new file mode 100755 index 0000000..61ebda2 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/maps.py @@ -0,0 +1,300 @@ +from typing import Dict, List, Optional, Tuple, Union + +import networkx as nx +import numpy as np +from habitat.core.simulator import Simulator +from habitat.core.utils import try_cv2_import +from habitat.tasks.vln.vln import VLNEpisode +from habitat.utils.visualizations import maps as habitat_maps + +cv2 = try_cv2_import() + +AGENT_SPRITE = habitat_maps.AGENT_SPRITE + +MAP_THICKNESS_SCALAR: int = 128 +MAP_INVALID_POINT = 0 +MAP_VALID_POINT = 1 +MAP_BORDER_INDICATOR = 2 +MAP_SOURCE_POINT_INDICATOR = 3 +MAP_TARGET_POINT_INDICATOR = 4 +MAP_MP3D_WAYPOINT = 5 +MAP_VIEW_POINT_INDICATOR = 6 +MAP_TARGET_BOUNDING_BOX = 7 +MAP_REFERENCE_POINT = 8 +MAP_MP3D_REFERENCE_PATH = 9 +MAP_SHORTEST_PATH_WAYPOINT = 10 + +TOP_DOWN_MAP_COLORS = np.full((256, 3), 150, dtype=np.uint8) +TOP_DOWN_MAP_COLORS[12:] = cv2.applyColorMap( + np.arange(244, dtype=np.uint8), cv2.COLORMAP_JET +).squeeze(1)[:, ::-1] +TOP_DOWN_MAP_COLORS[MAP_INVALID_POINT] = [255, 255, 255] # White +TOP_DOWN_MAP_COLORS[MAP_VALID_POINT] = [150, 150, 150] # Light Grey +TOP_DOWN_MAP_COLORS[MAP_BORDER_INDICATOR] = [50, 50, 50] # Grey +TOP_DOWN_MAP_COLORS[MAP_SOURCE_POINT_INDICATOR] = [0, 0, 200] # Blue +TOP_DOWN_MAP_COLORS[MAP_TARGET_POINT_INDICATOR] = [200, 0, 0] # Red +TOP_DOWN_MAP_COLORS[MAP_MP3D_WAYPOINT] = [0, 200, 0] # Green +TOP_DOWN_MAP_COLORS[MAP_VIEW_POINT_INDICATOR] = [245, 150, 150] # Light Red +TOP_DOWN_MAP_COLORS[MAP_TARGET_BOUNDING_BOX] = [0, 175, 0] # Dark Green +TOP_DOWN_MAP_COLORS[MAP_REFERENCE_POINT] = [0, 0, 0] # Black +TOP_DOWN_MAP_COLORS[MAP_MP3D_REFERENCE_PATH] = [0, 0, 0] # Black +TOP_DOWN_MAP_COLORS[MAP_SHORTEST_PATH_WAYPOINT] = [0, 150, 0] # Dark Green + + +def get_top_down_map(sim, map_resolution, meters_per_pixel): + base_height = sim.get_agent(0).state.position[1] + td_map = habitat_maps.get_topdown_map( + sim.pathfinder, + base_height, + map_resolution, + False, + meters_per_pixel, + ) + return td_map + + +def colorize_topdown_map( + top_down_map: np.ndarray, + fog_of_war_mask: Optional[np.ndarray] = None, + fog_of_war_desat_amount: float = 0.5, +) -> np.ndarray: + r"""Same as `maps.colorize_topdown_map` in Habitat-Lab, but with different map + colors. + """ + _map = TOP_DOWN_MAP_COLORS[top_down_map] + + if fog_of_war_mask is not None: + fog_of_war_desat_values = np.array([[fog_of_war_desat_amount], [1.0]]) + # Only desaturate things that are valid points as only valid points get revealed + desat_mask = top_down_map != MAP_INVALID_POINT + + _map[desat_mask] = ( + _map * fog_of_war_desat_values[fog_of_war_mask] + ).astype(np.uint8)[desat_mask] + + return _map + + +def static_to_grid( + realworld_x: float, + realworld_y: float, + grid_resolution: Tuple[int, int], + bounds: Dict[str, Tuple[float, float]], +): + r"""Return gridworld index of realworld coordinates assuming top-left corner + is the origin. The real world coordinates of lower left corner are + (coordinate_min, coordinate_min) and of top right corner are + (coordinate_max, coordinate_max). Same as the habitat-Lab maps.to_grid function + but with a static `bounds` instead of requiring a SIM/pathfinder instance. + """ + grid_size = ( + abs(bounds["upper"][2] - bounds["lower"][2]) / grid_resolution[0], + abs(bounds["upper"][0] - bounds["lower"][0]) / grid_resolution[1], + ) + grid_x = int((realworld_x - bounds["lower"][2]) / grid_size[0]) + grid_y = int((realworld_y - bounds["lower"][0]) / grid_size[1]) + return grid_x, grid_y + + +def drawline( + img: np.ndarray, + pt1: Union[Tuple[float], List[float]], + pt2: Union[Tuple[float], List[float]], + color: List[int], + thickness: int = 1, + style: str = "dotted", + gap: int = 15, +) -> None: + """https://stackoverflow.com/questions/26690932/opencv-rectangle-with-dotted-or-dashed-lines + style: "dotted", "dashed", or "filled" + """ + assert style in ["dotted", "dashed", "filled"] + + if style == "filled": + cv2.line(img, pt1, pt2, color, thickness) + return + + dist = ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5 + pts = [] + for i in np.arange(0, dist, gap): + r = i / dist + x = int((pt1[0] * (1 - r) + pt2[0] * r) + 0.5) + y = int((pt1[1] * (1 - r) + pt2[1] * r) + 0.5) + pts.append((x, y)) + + if style == "dotted": + for p in pts: + cv2.circle(img, p, thickness, color, -1) + else: + s = pts[0] + e = pts[0] + for i, p in enumerate(pts): + s = e + e = p + if i % 2 == 1: + cv2.line(img, s, e, color, thickness) + + +def drawpoint( + img: np.ndarray, + position: Union[Tuple[int], List[int]], + color: List[int], + meters_per_px: float, + pad: float = 0.3, +) -> None: + point_padding = int(pad / meters_per_px) + img[ + position[0] - point_padding : position[0] + point_padding + 1, + position[1] - point_padding : position[1] + point_padding + 1, + ] = color + + +def draw_reference_path( + img: np.ndarray, + sim: Simulator, + episode: VLNEpisode, + map_resolution: int, + meters_per_px: float, +): + r"""Draws lines between each waypoint in the reference path.""" + shortest_path_points = [ + habitat_maps.to_grid( + p[2], + p[0], + img.shape[0:2], + sim, + )[::-1] + for p in episode.reference_path + ] + + pt_from = None + for i, pt_to in enumerate(shortest_path_points): + + if i != 0: + drawline( + img, + (pt_from[0], pt_from[1]), + (pt_to[0], pt_to[1]), + MAP_SHORTEST_PATH_WAYPOINT, + thickness=int(0.4 * map_resolution / MAP_THICKNESS_SCALAR), + style="dashed", + gap=10, + ) + pt_from = pt_to + + for pt in shortest_path_points: + drawpoint( + img, (pt[1], pt[0]), MAP_SHORTEST_PATH_WAYPOINT, meters_per_px + ) + + +def draw_straight_shortest_path_points( + img: np.ndarray, + sim: Simulator, + map_resolution: int, + shortest_path_points: List[List[float]], +): + r"""Draws the shortest path from start to goal assuming a standard + discrete action space. + """ + shortest_path_points = [ + habitat_maps.to_grid(p[2], p[0], img.shape[0:2], sim)[::-1] + for p in shortest_path_points + ] + + habitat_maps.draw_path( + img, + [(p[1], p[0]) for p in shortest_path_points], + MAP_SHORTEST_PATH_WAYPOINT, + int(0.4 * map_resolution / MAP_THICKNESS_SCALAR), + ) + + +def draw_source_and_target( + img: np.ndarray, sim: Simulator, episode: VLNEpisode, meters_per_px: float +): + s_x, s_y = habitat_maps.to_grid( + episode.start_position[2], + episode.start_position[0], + img.shape[0:2], + sim, + ) + drawpoint(img, (s_x, s_y), MAP_SOURCE_POINT_INDICATOR, meters_per_px) + + # mark target point + t_x, t_y = habitat_maps.to_grid( + episode.goals[0].position[2], + episode.goals[0].position[0], + img.shape[0:2], + sim, + ) + drawpoint(img, (t_x, t_y), MAP_TARGET_POINT_INDICATOR, meters_per_px) + + +def get_nearest_node(graph: nx.Graph, current_position: List[float]) -> str: + """Determine the closest MP3D node to the agent's start position as given + by a [x,z] position vector. + Returns: + node ID + """ + nearest = None + dist = float("inf") + for node in graph: + node_pos = graph.nodes[node]["position"] + node_pos = np.take(node_pos, (0, 2)) + cur_dist = np.linalg.norm( + np.array(node_pos) - np.array(current_position), ord=2 + ) + if cur_dist < dist: + dist = cur_dist + nearest = node + return nearest + + +def update_nearest_node( + graph: nx.Graph, nearest_node: str, current_position: np.array +) -> str: + """Determine the closest MP3D node to the agent's current position as + given by a [x,z] position vector. The selected node must be reachable + from the previous MP3D node as specified in the nav-graph edges. + Returns: + node ID + """ + nearest = None + dist = float("inf") + + for node in [nearest_node] + [e[1] for e in graph.edges(nearest_node)]: + node_pos = graph.nodes[node]["position"] + node_pos = np.take(node_pos, (0, 2)) + cur_dist = np.linalg.norm( + np.array(node_pos) - np.array(current_position), ord=2 + ) + if cur_dist < dist: + dist = cur_dist + nearest = node + return nearest + + +def draw_mp3d_nodes( + img: np.ndarray, + sim: Simulator, + episode: VLNEpisode, + graph: nx.Graph, + meters_per_px: float, +): + n = get_nearest_node( + graph, (episode.start_position[0], episode.start_position[2]) + ) + starting_height = graph.nodes[n]["position"][1] + for node in graph: + pos = graph.nodes[node]["position"] + + # no obvious way to differentiate between floors. Use this for now. + if abs(pos[1] - starting_height) < 1.0: + r_x, r_y = habitat_maps.to_grid( + pos[2], pos[0], img.shape[0:2], sim + ) + + # only paint if over a valid point + if img[r_x, r_y]: + drawpoint(img, (r_x, r_y), MAP_MP3D_WAYPOINT, meters_per_px) diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/measures.py b/Downstream/Visual-Language-Navigation/habitat_extensions/measures.py new file mode 100755 index 0000000..6e1de41 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/measures.py @@ -0,0 +1,572 @@ +import gzip +import json +import pickle +from typing import Any, List, Union + +import numpy as np +from dtw import dtw +from fastdtw import fastdtw +from habitat.config import Config +from habitat.core.embodied_task import EmbodiedTask, Measure +from habitat.core.registry import registry +from habitat.core.simulator import Simulator +from habitat.tasks.nav.nav import DistanceToGoal, Success +from habitat.tasks.utils import cartesian_to_polar +from habitat.utils.geometry_utils import quaternion_rotate_vector +from habitat.utils.visualizations import fog_of_war +from habitat.utils.visualizations import maps as habitat_maps + +from habitat_extensions import maps + +@registry.register_measure +class Position(Measure): + r"""Path Length (PL) + + PL = sum(geodesic_distance(agent_prev_position, agent_position) + over all agent positions. + """ + + cls_uuid: str = "position" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + + super().__init__(**kwargs) + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric(self, episode, *args: Any, **kwargs: Any): + self._metric = {'distance':[], 'position':[]} + self.update_metric(episode) + + def update_metric(self, episode, *args: Any, **kwargs: Any): + current_position = self._sim.get_agent_state().position + if len(self._metric['position']) > 0: + if (current_position == self._metric['position'][-1]).all(): + return + distance = self._sim.geodesic_distance( + current_position, + [goal.position for goal in episode.goals], + episode, + ) + self._metric['position'].append(self._sim.get_agent_state().position) + self._metric['distance'].append(distance) + +@registry.register_measure +class PathLength(Measure): + r"""Path Length (PL) + + PL = sum(geodesic_distance(agent_prev_position, agent_position) + over all agent positions. + """ + + cls_uuid: str = "path_length" + + @staticmethod + def euclidean_distance( + position_a: np.ndarray, position_b: np.ndarray + ) -> float: + return np.linalg.norm(position_b - position_a, ord=2) + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + + super().__init__(**kwargs) + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric(self, *args: Any, **kwargs: Any): + self._previous_position = self._sim.get_agent_state().position + self._metric = 0.0 + + def update_metric(self, *args: Any, **kwargs: Any): + current_position = self._sim.get_agent_state().position + self._metric += self.euclidean_distance( + current_position, self._previous_position + ) + self._previous_position = current_position + + +@registry.register_measure +class OracleNavigationError(Measure): + r"""Oracle Navigation Error (ONE) + + ONE = min(geosdesic_distance(agent_pos, goal)) + over all locations in the agent's path. + """ + + cls_uuid: str = "oracle_navigation_error" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric( + self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any + ): + task.measurements.check_measure_dependencies( + self.uuid, [DistanceToGoal.cls_uuid] + ) + self._metric = float("inf") + self.update_metric(episode, task) + + def update_metric(self, episode, task: EmbodiedTask, **kwargs: Any): + distance_to_target = task.measurements.measures[ + DistanceToGoal.cls_uuid + ].get_metric() + self._metric = min(self._metric, distance_to_target) + + +@registry.register_measure +class OracleSuccess(Measure): + r"""Oracle Success Rate (OSR) + + OSR = I(ONE <= goal_radius), + where ONE is Oracle Navigation Error. + """ + + cls_uuid: str = "oracle_success" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric( + self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any + ): + task.measurements.check_measure_dependencies( + self.uuid, [DistanceToGoal.cls_uuid] + ) + self._metric = 0 + self.update_metric(episode, task) + + def update_metric( + self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any + ): + d = task.measurements.measures[DistanceToGoal.cls_uuid].get_metric() + self._metric = float(self._metric or d < self._config.SUCCESS_DISTANCE) + + +@registry.register_measure +class OracleSPL(Measure): + r"""OracleSPL (Oracle Success weighted by Path Length) + + OracleSPL = max(SPL) over all points in the agent path + """ + + cls_uuid: str = "oracle_spl" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric( + self, *args: Any, episode, task: EmbodiedTask, **kwargs: Any + ): + task.measurements.check_measure_dependencies(self.uuid, ["spl"]) + self._metric = 0.0 + + def update_metric( + self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any + ): + spl = task.measurements.measures["spl"].get_metric() + self._metric = max(self._metric, spl) + + +@registry.register_measure +class StepsTaken(Measure): + r"""Counts the number of times update_metric() is called. This is equal to + the number of times that the agent takes an action. STOP counts as an + action. + """ + + cls_uuid: str = "steps_taken" + + def __init__(self, *args: Any, **kwargs: Any): + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric(self, *args: Any, **kwargs: Any): + self._metric = 0.0 + + def update_metric(self, *args: Any, **kwargs: Any): + self._metric += 1.0 + + +@registry.register_measure +class NDTW(Measure): + r"""NDTW (Normalized Dynamic Time Warping) + + ref: Effective and General Evaluation for Instruction + Conditioned Navigation using Dynamic Time + Warping - Magalhaes et. al + https://arxiv.org/pdf/1907.05446.pdf + """ + + cls_uuid: str = "ndtw" + + @staticmethod + def euclidean_distance( + position_a: Union[List[float], np.ndarray], + position_b: Union[List[float], np.ndarray], + ) -> float: + return np.linalg.norm( + np.array(position_b) - np.array(position_a), ord=2 + ) + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + self.dtw_func = fastdtw if config.FDTW else dtw + + if "{role}" in config.GT_PATH: + self.gt_json = {} + for role in RxRVLNCEDatasetV1.annotation_roles: + with gzip.open( + config.GT_PATH.format(split=config.SPLIT, role=role), "rt" + ) as f: + self.gt_json.update(json.load(f)) + else: + with gzip.open( + config.GT_PATH.format(split=config.SPLIT), "rt" + ) as f: + self.gt_json = json.load(f) + + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric(self, episode, *args: Any, **kwargs: Any): + self.locations = [] + self.gt_locations = self.gt_json[str(episode.episode_id)]["locations"] + self.update_metric() + + def update_metric(self, *args: Any, **kwargs: Any): + current_position = self._sim.get_agent_state().position.tolist() + if len(self.locations) == 0: + self.locations.append(current_position) + else: + if current_position == self.locations[-1]: + return + self.locations.append(current_position) + + dtw_distance = self.dtw_func( + self.locations, self.gt_locations, dist=self.euclidean_distance + )[0] + + nDTW = np.exp( + -dtw_distance + / (len(self.gt_locations) * self._config.SUCCESS_DISTANCE) + ) + self._metric = nDTW + + +@registry.register_measure +class SDTW(Measure): + r"""SDTW (Success Weighted be nDTW) + + ref: Effective and General Evaluation for Instruction + Conditioned Navigation using Dynamic Time + Warping - Magalhaes et. al + https://arxiv.org/pdf/1907.05446.pdf + """ + + cls_uuid: str = "sdtw" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._config = config + + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def reset_metric(self, episode, task, *args: Any, **kwargs: Any): + task.measurements.check_measure_dependencies( + self.uuid, [NDTW.cls_uuid, Success.cls_uuid] + ) + self.update_metric(episode, task) + + def update_metric( + self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any + ): + ep_success = task.measurements.measures[Success.cls_uuid].get_metric() + nDTW = task.measurements.measures[NDTW.cls_uuid].get_metric() + self._metric = ep_success * nDTW + + +@registry.register_measure +class TopDownMapVLNCE(Measure): + r"""A top down map that optionally shows VLN-related visual information + such as MP3D node locations and MP3D agent traversals. + """ + + cls_uuid: str = "top_down_map_vlnce" + + def __init__( + self, *args: Any, sim: Simulator, config: Config, **kwargs: Any + ): + self._sim = sim + self._config = config + with open(self._config.GRAPHS_FILE, "rb") as f: + self._conn_graphs = pickle.load(f) + super().__init__() + + def _get_uuid(self, *args: Any, **kwargs: Any) -> str: + return self.cls_uuid + + def get_original_map(self): + habitat_maps.get_topdown_map_from_sim + top_down_map = maps.get_top_down_map( + self._sim, + self._config.MAP_RESOLUTION, + self._meters_per_pixel, + ) + + if self._config.FOG_OF_WAR.DRAW: + self._fog_of_war_mask = np.zeros_like(top_down_map) + else: + self._fog_of_war_mask = None + + return top_down_map + + def reset_metric(self, *args: Any, episode, **kwargs: Any): + self._scene_id = episode.scene_id.split("/")[-2] + self._step_count = 0 + self._metric = None + self._meters_per_pixel = habitat_maps.calculate_meters_per_pixel( + self._config.MAP_RESOLUTION, self._sim + ) + self._top_down_map = self.get_original_map() + agent_position = self._sim.get_agent_state().position + scene_id = episode.scene_id.split("/")[-1].split(".")[0] + a_x, a_y = habitat_maps.to_grid( + agent_position[2], + agent_position[0], + self._top_down_map.shape[0:2], + sim=self._sim, + ) + self._previous_xy_location = (a_y, a_x) + + if self._config.FOG_OF_WAR.DRAW: + self._fog_of_war_mask = fog_of_war.reveal_fog_of_war( + self._top_down_map, + self._fog_of_war_mask, + np.array([a_x, a_y]), + self.get_polar_angle(), + fov=self._config.FOG_OF_WAR.FOV, + max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST + / habitat_maps.calculate_meters_per_pixel( + self._config.MAP_RESOLUTION, sim=self._sim + ), + ) + + if self._config.DRAW_FIXED_WAYPOINTS: + maps.draw_mp3d_nodes( + self._top_down_map, + self._sim, + episode, + self._conn_graphs[scene_id], + self._meters_per_pixel, + ) + + if self._config.DRAW_SHORTEST_PATH: + shortest_path_points = self._sim.get_straight_shortest_path_points( + agent_position, episode.goals[0].position + ) + maps.draw_straight_shortest_path_points( + self._top_down_map, + self._sim, + self._config.MAP_RESOLUTION, + shortest_path_points, + ) + + if self._config.DRAW_REFERENCE_PATH: + maps.draw_reference_path( + self._top_down_map, + self._sim, + episode, + self._config.MAP_RESOLUTION, + self._meters_per_pixel, + ) + + # draw source and target points last to avoid overlap + if self._config.DRAW_SOURCE_AND_TARGET: + maps.draw_source_and_target( + self._top_down_map, + self._sim, + episode, + self._meters_per_pixel, + ) + + # MP3D START NODE + self._nearest_node = maps.get_nearest_node( + self._conn_graphs[scene_id], np.take(agent_position, (0, 2)) + ) + nn_position = self._conn_graphs[self._scene_id].nodes[ + self._nearest_node + ]["position"] + self.s_x, self.s_y = habitat_maps.to_grid( + nn_position[2], + nn_position[0], + self._top_down_map.shape[0:2], + self._sim, + ) + self.update_metric(episode, action=None) + + def update_metric(self, *args: Any, **kwargs: Any): + self._step_count += 1 + ( + house_map, + map_agent_pos, + ) = self.update_map(self._sim.get_agent_state().position) + + self._metric = { + "map": house_map, + "fog_of_war_mask": self._fog_of_war_mask, + "agent_map_coord": map_agent_pos, + "agent_angle": self.get_polar_angle(), + "bounds": { + k: v + for k, v in zip( + ["lower", "upper"], + self._sim.pathfinder.get_bounds(), + ) + }, + "meters_per_px": self._meters_per_pixel, + } + + def get_polar_angle(self): + agent_state = self._sim.get_agent_state() + # quaternion is in x, y, z, w format + ref_rotation = agent_state.rotation + + heading_vector = quaternion_rotate_vector( + ref_rotation.inverse(), np.array([0, 0, -1]) + ) + + phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1] + z_neg_z_flip = np.pi + return np.array(phi) + z_neg_z_flip + + def update_map(self, agent_position): + a_x, a_y = habitat_maps.to_grid( + agent_position[2], + agent_position[0], + self._top_down_map.shape[0:2], + self._sim, + ) + # Don't draw over the source point + gradient_color = 15 + min( + self._step_count * 245 // self._config.MAX_EPISODE_STEPS, 245 + ) + if self._top_down_map[a_x, a_y] != maps.MAP_SOURCE_POINT_INDICATOR: + maps.drawline( + self._top_down_map, + self._previous_xy_location, + (a_y, a_x), + gradient_color, + thickness=int( + self._config.MAP_RESOLUTION + * 1.4 + / maps.MAP_THICKNESS_SCALAR + ), + style="filled", + ) + + if self._config.FOG_OF_WAR.DRAW: + self._fog_of_war_mask = fog_of_war.reveal_fog_of_war( + self._top_down_map, + self._fog_of_war_mask, + np.array([a_x, a_y]), + self.get_polar_angle(), + self._config.FOG_OF_WAR.FOV, + max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST + / habitat_maps.calculate_meters_per_pixel( + self._config.MAP_RESOLUTION, sim=self._sim + ), + ) + + point_padding = int(0.2 / self._meters_per_pixel) + prev_nearest_node = self._nearest_node + self._nearest_node = maps.update_nearest_node( + self._conn_graphs[self._scene_id], + self._nearest_node, + np.take(agent_position, (0, 2)), + ) + if ( + self._nearest_node != prev_nearest_node + and self._config.DRAW_MP3D_AGENT_PATH + ): + nn_position = self._conn_graphs[self._scene_id].nodes[ + self._nearest_node + ]["position"] + (prev_s_x, prev_s_y) = (self.s_x, self.s_y) + self.s_x, self.s_y = habitat_maps.to_grid( + nn_position[2], + nn_position[0], + self._top_down_map.shape[0:2], + self._sim, + ) + self._top_down_map[ + self.s_x + - int(2.0 / 3.0 * point_padding) : self.s_x + + int(2.0 / 3.0 * point_padding) + + 1, + self.s_y + - int(2.0 / 3.0 * point_padding) : self.s_y + + int(2.0 / 3.0 * point_padding) + + 1, + ] = gradient_color + + maps.drawline( + self._top_down_map, + (prev_s_y, prev_s_x), + (self.s_y, self.s_x), + gradient_color, + thickness=int( + 1.0 + / 2.0 + * np.round( + self._config.MAP_RESOLUTION / maps.MAP_THICKNESS_SCALAR + ) + ), + ) + + self._previous_xy_location = (a_y, a_x) + map_agent_pos = (a_x, a_y) + return self._top_down_map, map_agent_pos diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/nav.py b/Downstream/Visual-Language-Navigation/habitat_extensions/nav.py new file mode 100755 index 0000000..e3bd4bb --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/nav.py @@ -0,0 +1,216 @@ +from turtle import heading +from typing import Any + +import math +import numpy as np + +from habitat.core.embodied_task import ( + SimulatorTaskAction, +) +from habitat.core.registry import registry +from habitat.sims.habitat_simulator.actions import HabitatSimActions +from habitat.tasks.utils import cartesian_to_polar +from habitat.utils.geometry_utils import quaternion_rotate_vector + + +@registry.register_task_action +class MoveHighToLowAction(SimulatorTaskAction): + def turn(self, angle): + ''' angle: 0 ~ 360 degree ''' + left_action = HabitatSimActions.TURN_LEFT + right_action = HabitatSimActions.TURN_RIGHT + turn_unit = self._sim.get_agent(0).agent_config.action_space[left_action].actuation.amount + + states = [] + + if 180 < angle <= 360: + angle -= 360 + if angle >=0: + turn_actions = [left_action] * int(angle // turn_unit) + else: + turn_actions = [right_action] * int(-angle // turn_unit) + + for turn_action in turn_actions: + self._sim.step_without_obs(turn_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + + return states + + def step(self, *args: Any, + angle: float, distance: float, + **kwargs: Any): + r"""This control method is called from ``Env`` on each ``step``. + """ + init_state = self._sim.get_agent_state() + + forward_action = HabitatSimActions.MOVE_FORWARD + init_forward = self._sim.get_agent(0).agent_config.action_space[ + forward_action].actuation.amount + + theta = np.arctan2(init_state.rotation.imag[1], + init_state.rotation.real) + angle / 2 + rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0) + + angle = math.degrees(angle) + states = self.turn(angle) + states.append((init_state.position,rotation)) + self._sim.set_agent_state(init_state.position, rotation) + + ksteps = int(distance//init_forward) + for k in range(ksteps): + if k == ksteps - 1: + output = self._sim.step(forward_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + else: + self._sim.step_without_obs(forward_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + + output['states'] = states + return output + + +@registry.register_task_action +class MoveHighToLowActionEval(SimulatorTaskAction): + def turn(self, angle): + ''' angle: 0 ~ 360 degree ''' + left_action = HabitatSimActions.TURN_LEFT + right_action = HabitatSimActions.TURN_RIGHT + turn_unit = self._sim.get_agent(0).agent_config.action_space[left_action].actuation.amount + + states = [] + + if 180 < angle <= 360: + angle -= 360 + if angle >=0: + turn_actions = [left_action] * int(angle // turn_unit) + else: + turn_actions = [right_action] * int(-angle // turn_unit) + + for turn_action in turn_actions: + self._sim.step_without_obs(turn_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + + return states + + def step(self, *args: Any, + angle: float, distance: float, + **kwargs: Any): + r"""This control method is called from ``Env`` on each ``step``. + """ + init_state = self._sim.get_agent_state() + + positions = [] + collisions = [] + forward_action = HabitatSimActions.MOVE_FORWARD + + init_forward = self._sim.get_agent(0).agent_config.action_space[ + forward_action].actuation.amount + theta = np.arctan2(init_state.rotation.imag[1], + init_state.rotation.real) + angle / 2 + rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0) + + angle = math.degrees(angle) + states = self.turn(angle) + states.append((init_state.position,rotation)) + self._sim.set_agent_state(init_state.position, rotation) + + ksteps = int(distance//init_forward) + for k in range(ksteps): + if k == ksteps - 1: + output = self._sim.step(forward_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + else: + self._sim.step_without_obs(forward_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + + positions.append(self._sim.get_agent_state().position) + collisions.append(self._sim.previous_step_collided) + + output['positions'] = positions + output['collisions'] = collisions + output['states'] = states + + return output + + +@registry.register_task_action +class MoveHighToLowActionInference(SimulatorTaskAction): + + def turn(self, angle): + ''' angle: 0 ~ 360 degree ''' + left_action = HabitatSimActions.TURN_LEFT + right_action = HabitatSimActions.TURN_RIGHT + turn_unit = self._sim.get_agent(0).agent_config.action_space[left_action].actuation.amount + + states = [] + + if 180 < angle <= 360: + angle -= 360 + if angle >=0: + turn_actions = [left_action] * int(angle // turn_unit) + else: + turn_actions = [right_action] * int(-angle // turn_unit) + + for turn_action in turn_actions: + self._sim.step_without_obs(turn_action) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + + return states + + def step(self, *args: Any, + angle: float, distance: float, + **kwargs: Any): + r"""This control method is called from ``Env`` on each ``step``. + """ + init_state = self._sim.get_agent_state() + + cur_path = [] + forward_action = HabitatSimActions.MOVE_FORWARD + + init_forward = self._sim.get_agent(0).agent_config.action_space[ + forward_action].actuation.amount + theta = np.arctan2(init_state.rotation.imag[1], + init_state.rotation.real) + angle / 2 + rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0) + + angle = math.degrees(angle) + states = self.turn(angle) + states.append((init_state.position,rotation)) + self._sim.set_agent_state(init_state.position, rotation) + + ksteps = int(distance//init_forward) + for k in range(ksteps): + if k == ksteps - 1: + output = self._sim.step(forward_action) + cur_path.append(self.get_agent_info()) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + else: + self._sim.step_without_obs(forward_action) + cur_path.append(self.get_agent_info()) + state = self._sim.get_agent_state() + states.append((state.position,state.rotation)) + + output['cur_path'] = cur_path + output['states'] = states + return output + + + def get_agent_info(self): + agent_state = self._sim.get_agent_state() + heading_vector = quaternion_rotate_vector( + agent_state.rotation.inverse(), np.array([0, 0, -1]) + ) + heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1] + return { + "position": agent_state.position.tolist(), + "heading": heading, + "stop": False, + } \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/obs_transformers.py b/Downstream/Visual-Language-Navigation/habitat_extensions/obs_transformers.py new file mode 100755 index 0000000..74a3fc1 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/obs_transformers.py @@ -0,0 +1,191 @@ +import copy +import numbers +from typing import Dict, List, Tuple, Union + +import torch +from gym import spaces +from habitat.config import Config +from habitat.core.logging import logger +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat_baselines.common.obs_transformers import ObservationTransformer +from habitat_baselines.utils.common import ( + center_crop, + get_image_height_width, + overwrite_gym_box_shape, +) +from torch import Tensor + + +@baseline_registry.register_obs_transformer() +class CenterCropperPerSensor(ObservationTransformer): + """An observation transformer that center crops your input on a per-sensor basis.""" + + sensor_crops: Dict[str, Union[int, Tuple[int, int]]] + channels_last: bool + + def __init__( + self, + sensor_crops: List[Tuple[str, Union[int, Tuple[int, int]]]], + channels_last: bool = True, + ): + """Args: + size: A sequence (h, w) or int of the size you wish to resize/center_crop. + If int, assumes square crop + channels_list: indicates if channels is the last dimension + trans_keys: The list of sensors it will try to centercrop. + """ + super().__init__() + + self.sensor_crops = dict(sensor_crops) + for k in self.sensor_crops: + size = self.sensor_crops[k] + if isinstance(size, numbers.Number): + self.sensor_crops[k] = (int(size), int(size)) + assert len(size) == 2, "forced input size must be len of 2 (h, w)" + + self.channels_last = channels_last + + def transform_observation_space( + self, + observation_space: spaces.Dict, + ): + observation_space = copy.deepcopy(observation_space) + for key in observation_space.spaces: + if ( + key in self.sensor_crops + and observation_space.spaces[key].shape[-3:-1] + != self.sensor_crops[key] + ): + h, w = get_image_height_width( + observation_space.spaces[key], channels_last=True + ) + logger.info( + "Center cropping observation size of %s from %s to %s" + % (key, (h, w), self.sensor_crops[key]) + ) + + observation_space.spaces[key] = overwrite_gym_box_shape( + observation_space.spaces[key], self.sensor_crops[key] + ) + return observation_space + + @torch.no_grad() + def forward(self, observations: Dict[str, Tensor]) -> Dict[str, Tensor]: + observations.update( + { + sensor: center_crop( + observations[sensor], + self.sensor_crops[sensor], + channels_last=self.channels_last, + ) + for sensor in self.sensor_crops + if sensor in observations + } + ) + return observations + + @classmethod + def from_config(cls, config: Config): + cc_config = config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR + return cls(cc_config.SENSOR_CROPS) + +@baseline_registry.register_obs_transformer() +class ResizerPerSensor(ObservationTransformer): + r"""An nn module the resizes images to any aspect ratio. + This module assumes that all images in the batch are of the same size. + """ + + def __init__( + self, + sizes: int, + channels_last: bool = True, + trans_keys: Tuple[str] = ("rgb", "depth", "semantic"), + ): + super().__init__() + """Args: + size: The size you want to resize + channels_last: indicates if channels is the last dimension + """ + self.sensor_resizes = dict(sizes) + for k in self.sensor_resizes: + size = self.sensor_resizes[k] + if isinstance(size, numbers.Number): + self.sensor_resizes[k] = (int(size), int(size)) + assert len(size) == 2, "forced input size must be len of 2 (h, w)" + + self.channels_last = channels_last + + def transform_observation_space( + self, + observation_space: spaces.Dict, + ): + + for key in observation_space.spaces: + if ( + key in self.sensor_resizes + and observation_space.spaces[key].shape[-3:-1] + != self.sensor_resizes[key] + ): + h, w = get_image_height_width( + observation_space.spaces[key], channels_last=True + ) + logger.info( + "Resizing observation size of %s from %s to %s" + % (key, (h, w), self.sensor_resizes[key]) + ) + + observation_space.spaces[key] = overwrite_gym_box_shape( + observation_space.spaces[key], self.sensor_resizes[key] + ) + + return observation_space + + def _transform_obs(self, obs: torch.Tensor, size) -> torch.Tensor: + img = torch.as_tensor(obs) + no_batch_dim = len(img.shape) == 3 + if len(img.shape) < 3 or len(img.shape) > 5: + raise NotImplementedError() + if no_batch_dim: + img = img.unsqueeze(0) # Adds a batch dimension + h, w = get_image_height_width(img, channels_last=self.channels_last) + if self.channels_last: + if len(img.shape) == 4: + # NHWC -> NCHW + img = img.permute(0, 3, 1, 2) + else: + # NDHWC -> NDCHW + img = img.permute(0, 1, 4, 2, 3) + + h, w = size + img = torch.nn.functional.interpolate( + img.float(), size=(h, w), mode="area" + ).to(dtype=img.dtype) + if self.channels_last: + if len(img.shape) == 4: + # NCHW -> NHWC + img = img.permute(0, 2, 3, 1) + else: + # NDCHW -> NDHWC + img = img.permute(0, 1, 3, 4, 2) + if no_batch_dim: + img = img.squeeze(dim=0) # Removes the batch dimension + return img + + @torch.no_grad() + def forward( + self, observations: Dict[str, torch.Tensor] + ) -> Dict[str, torch.Tensor]: + observations.update( + { + sensor: self._transform_obs( + observations[sensor], self.sensor_resizes[sensor]) + for sensor in self.sensor_resizes + if sensor in observations + } + ) + return observations + + @classmethod + def from_config(cls, config: Config): + r_config = config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR + return cls(r_config.SIZES) diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/sensors.py b/Downstream/Visual-Language-Navigation/habitat_extensions/sensors.py new file mode 100755 index 0000000..d92a234 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/sensors.py @@ -0,0 +1,150 @@ +from typing import Any, Dict + +import numpy as np +from gym import spaces +from habitat.config import Config +from habitat.core.registry import registry +from habitat.core.simulator import Observations, Sensor, SensorTypes, Simulator +from habitat.sims.habitat_simulator.actions import HabitatSimActions +from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower + +from habitat_extensions.shortest_path_follower import ( + ShortestPathFollowerCompat, +) +from habitat_extensions.task import VLNExtendedEpisode + + +@registry.register_sensor(name="GlobalGPSSensor") +class GlobalGPSSensor(Sensor): + r"""The agents current location in the global coordinate frame + + Args: + sim: reference to the simulator for calculating task observations. + config: Contains the DIMENSIONALITY field for the number of dimensions + to express the agents position + Attributes: + _dimensionality: number of dimensions used to specify the agents position + """ + + cls_uuid: str = "globalgps" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + self._dimensionality = getattr(config, "DIMENSIONALITY", 2) + assert self._dimensionality in [2, 3] + super().__init__(config=config) + + def _get_uuid(self, *args: Any, **kwargs: Any): + return self.cls_uuid + + def _get_sensor_type(self, *args: Any, **kwargs: Any): + return SensorTypes.POSITION + + def _get_observation_space(self, *args: Any, **kwargs: Any): + return spaces.Box( + low=np.finfo(np.float32).min, + high=np.finfo(np.float32).max, + shape=(self._dimensionality,), + dtype=np.float32, + ) + + def get_observation(self, *args: Any, **kwargs: Any): + return self._sim.get_agent_state().position.astype(np.float32) + + +@registry.register_sensor +class ShortestPathSensor(Sensor): + r"""Sensor for observing the action to take that follows the shortest path + to the goal. + + Args: + sim: reference to the simulator for calculating task observations. + config: config for the sensor. + """ + + cls_uuid: str = "shortest_path_sensor" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + super().__init__(config=config) + if config.USE_ORIGINAL_FOLLOWER: + self.follower = ShortestPathFollowerCompat( + sim, config.GOAL_RADIUS, return_one_hot=False + ) + self.follower.mode = "geodesic_path" + else: + self.follower = ShortestPathFollower( + sim, config.GOAL_RADIUS, return_one_hot=False + ) + # self._sim = sim + def _get_uuid(self, *args: Any, **kwargs: Any): + return self.cls_uuid + + def _get_sensor_type(self, *args: Any, **kwargs: Any): + return SensorTypes.TACTILE + + def _get_observation_space(self, *args: Any, **kwargs: Any): + return spaces.Box(low=0.0, high=100, shape=(1,), dtype=np.float) + + def get_observation(self, *args: Any, episode, **kwargs: Any): + best_action = self.follower.get_next_action(episode.goals[0].position) + return np.array( + [ + best_action + if best_action is not None + else HabitatSimActions.STOP + ] + ) + + +@registry.register_sensor +class VLNOracleProgressSensor(Sensor): + r"""Sensor for observing how much progress has been made towards the goal. + + Args: + sim: reference to the simulator for calculating task observations. + config: config for the sensor. + """ + + cls_uuid: str = "progress" + + def __init__( + self, sim: Simulator, config: Config, *args: Any, **kwargs: Any + ): + self._sim = sim + super().__init__(config=config) + + def _get_uuid(self, *args: Any, **kwargs: Any): + return self.cls_uuid + + def _get_sensor_type(self, *args: Any, **kwargs: Any): + # TODO: what is the correct sensor type? + return SensorTypes.MEASUREMENT + + def _get_observation_space(self, *args: Any, **kwargs: Any): + return spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.float) + + def get_observation( + self, observations, *args: Any, episode, **kwargs: Any + ): + current_position = self._sim.get_agent_state().position.tolist() + + distance_to_target = self._sim.geodesic_distance( + current_position, episode.goals[0].position + ) + + + if "geodesic_distance" not in episode.info.keys(): + distance_from_start = self._sim.geodesic_distance( + episode.start_position, episode.goals[0].position + ) + episode.info["geodesic_distance"] = distance_from_start + + distance_from_start = episode.info["geodesic_distance"] + + progress = (distance_from_start - distance_to_target) / distance_from_start + + return np.array(progress, dtype = np.float32) \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/shortest_path_follower.py b/Downstream/Visual-Language-Navigation/habitat_extensions/shortest_path_follower.py new file mode 100755 index 0000000..a6aeba7 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/shortest_path_follower.py @@ -0,0 +1,199 @@ +# Copied from https://github.com/facebookresearch/habitat-lab/blob/v0.1.4/habitat/tasks/nav/shortest_path_follower.py +# Use the Habitat v0.1.4 ShortestPathFollower for compatibility with +# the dataset generation oracle. + +from typing import Optional, Union + +import habitat_sim +import numpy as np +from habitat.sims.habitat_simulator.actions import HabitatSimActions +from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim +from habitat.utils.geometry_utils import ( + angle_between_quaternions, + quaternion_from_two_vectors, +) + +EPSILON = 1e-6 + + +def action_to_one_hot(action: int) -> np.array: + one_hot = np.zeros(len(HabitatSimActions), dtype=np.float32) + one_hot[action] = 1 + return one_hot + + +class ShortestPathFollowerCompat: + r"""Utility class for extracting the action on the shortest path to the + goal. + Args: + sim: HabitatSim instance. + goal_radius: Distance between the agent and the goal for it to be + considered successful. + return_one_hot: If true, returns a one-hot encoding of the action + (useful for training ML agents). If false, returns the + SimulatorAction. + """ + + def __init__( + self, sim: HabitatSim, goal_radius: float, return_one_hot: bool = True + ): + assert ( + getattr(sim, "geodesic_distance", None) is not None + ), "{} must have a method called geodesic_distance".format( + type(sim).__name__ + ) + + self._sim = sim + self._max_delta = sim.habitat_config.FORWARD_STEP_SIZE - EPSILON + self._goal_radius = goal_radius + self._step_size = sim.habitat_config.FORWARD_STEP_SIZE + + self._mode = ( + "geodesic_path" + if getattr(sim, "get_straight_shortest_path_points", None) + is not None + else "greedy" + ) + self._return_one_hot = return_one_hot + + def _get_return_value(self, action) -> Union[int, np.array]: + if self._return_one_hot: + return action_to_one_hot(action) + else: + return action + + def get_next_action( + self, goal_pos: np.array + ) -> Optional[Union[int, np.array]]: + """Returns the next action along the shortest path.""" + if ( + self._sim.geodesic_distance( + self._sim.get_agent_state().position, goal_pos + ) + <= self._goal_radius + ): + return None + + max_grad_dir = self._est_max_grad_dir(goal_pos) + if max_grad_dir is None: + return self._get_return_value(HabitatSimActions.MOVE_FORWARD) + return self._step_along_grad(max_grad_dir) + + def _step_along_grad( + self, grad_dir: np.quaternion + ) -> Union[int, np.array]: + current_state = self._sim.get_agent_state() + alpha = angle_between_quaternions(grad_dir, current_state.rotation) + if alpha <= np.deg2rad(self._sim.habitat_config.TURN_ANGLE) + EPSILON: + return self._get_return_value(HabitatSimActions.MOVE_FORWARD) + else: + sim_action = HabitatSimActions.TURN_LEFT + self._sim.step(sim_action) + best_turn = ( + HabitatSimActions.TURN_LEFT + if ( + angle_between_quaternions( + grad_dir, self._sim.get_agent_state().rotation + ) + < alpha + ) + else HabitatSimActions.TURN_RIGHT + ) + self._reset_agent_state(current_state) + return self._get_return_value(best_turn) + + def _reset_agent_state(self, state: habitat_sim.AgentState) -> None: + self._sim.set_agent_state( + state.position, state.rotation, reset_sensors=False + ) + + def _geo_dist(self, goal_pos: np.array) -> float: + return self._sim.geodesic_distance( + self._sim.get_agent_state().position, goal_pos + ) + + def _est_max_grad_dir(self, goal_pos: np.array) -> np.array: + + current_state = self._sim.get_agent_state() + current_pos = current_state.position + + if self.mode == "geodesic_path": + points = self._sim.get_straight_shortest_path_points( + self._sim.get_agent_state().position, goal_pos + ) + # Add a little offset as things get weird if + # points[1] - points[0] is anti-parallel with forward + if len(points) < 2: + return None + max_grad_dir = quaternion_from_two_vectors( + self._sim.forward_vector, + points[1] + - points[0] + + EPSILON + * np.cross(self._sim.up_vector, self._sim.forward_vector), + ) + max_grad_dir.x = 0 + max_grad_dir = np.normalized(max_grad_dir) + else: + current_rotation = self._sim.get_agent_state().rotation + current_dist = self._geo_dist(goal_pos) + + best_geodesic_delta = -2 * self._max_delta + best_rotation = current_rotation + for _ in range(0, 360, self._sim.habitat_config.TURN_ANGLE): + sim_action = HabitatSimActions.MOVE_FORWARD + self._sim.step(sim_action) + new_delta = current_dist - self._geo_dist(goal_pos) + + if new_delta > best_geodesic_delta: + best_rotation = self._sim.get_agent_state().rotation + best_geodesic_delta = new_delta + + # If the best delta is within (1 - cos(TURN_ANGLE))% of the + # best delta (the step size), then we almost certainly have + # found the max grad dir and should just exit + if np.isclose( + best_geodesic_delta, + self._max_delta, + rtol=1 + - np.cos(np.deg2rad(self._sim.habitat_config.TURN_ANGLE)), + ): + break + + self._sim.set_agent_state( + current_pos, + self._sim.get_agent_state().rotation, + reset_sensors=False, + ) + + sim_action = HabitatSimActions.TURN_LEFT + self._sim.step(sim_action) + + self._reset_agent_state(current_state) + + max_grad_dir = best_rotation + + return max_grad_dir + + @property + def mode(self): + return self._mode + + @mode.setter + def mode(self, new_mode: str): + r"""Sets the mode for how the greedy follower determines the best next + step. + Args: + new_mode: geodesic_path indicates using the simulator's shortest + path algorithm to find points on the map to navigate between. + greedy indicates trying to move forward at all possible + orientations and selecting the one which reduces the geodesic + distance the most. + """ + assert new_mode in {"geodesic_path", "greedy"} + if new_mode == "geodesic_path": + assert ( + getattr(self._sim, "get_straight_shortest_path_points", None) + is not None + ) + self._mode = new_mode diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/task.py b/Downstream/Visual-Language-Navigation/habitat_extensions/task.py new file mode 100755 index 0000000..df73b3d --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/task.py @@ -0,0 +1,242 @@ +import gzip +import json +import os +from typing import Dict, List, Optional, Union + +import attr +from habitat.config import Config +from habitat.core.dataset import Dataset +from habitat.core.registry import registry +from habitat.core.utils import not_none_validator +from habitat.datasets.pointnav.pointnav_dataset import ALL_SCENES_MASK +from habitat.datasets.utils import VocabDict +from habitat.tasks.nav.nav import NavigationGoal +from habitat.tasks.vln.vln import InstructionData, VLNEpisode +import random + +random.seed(0) + +DEFAULT_SCENE_PATH_PREFIX = "data/scene_datasets/" +ALL_LANGUAGES_MASK = "*" +ALL_ROLES_MASK = "*" + + +@attr.s(auto_attribs=True) +class ExtendedInstructionData: + instruction_text: str = attr.ib(default=None, validator=not_none_validator) + instruction_id: Optional[str] = attr.ib(default=None) + language: Optional[str] = attr.ib(default=None) + annotator_id: Optional[str] = attr.ib(default=None) + edit_distance: Optional[float] = attr.ib(default=None) + timed_instruction: Optional[List[Dict[str, Union[float, str]]]] = attr.ib( + default=None + ) + instruction_tokens: Optional[List[str]] = attr.ib(default=None) + split: Optional[str] = attr.ib(default=None) + + +@attr.s(auto_attribs=True, kw_only=True) +class VLNExtendedEpisode(VLNEpisode): + goals: Optional[List[NavigationGoal]] = attr.ib(default=None) + reference_path: Optional[List[List[float]]] = attr.ib(default=None) + instruction: ExtendedInstructionData = attr.ib( + default=None, validator=not_none_validator + ) + trajectory_id: Optional[Union[int, str]] = attr.ib(default=None) + + +@registry.register_dataset(name="VLN-CE-v1") +class VLNCEDatasetV1(Dataset): + r"""Class inherited from Dataset that loads a Vision and Language + Navigation dataset. + """ + + episodes: List[VLNEpisode] + instruction_vocab: VocabDict + + @staticmethod + def check_config_paths_exist(config: Config) -> bool: + return os.path.exists( + config.DATA_PATH.format(split=config.SPLIT) + ) and os.path.exists(config.SCENES_DIR) + + @staticmethod + def _scene_from_episode(episode: VLNEpisode) -> str: + r"""Helper method to get the scene name from an episode. Assumes + the scene_id is formated /path/to/. + """ + return os.path.splitext(os.path.basename(episode.scene_id))[0] + + @classmethod + def get_scenes_to_load(cls, config: Config) -> List[str]: + r"""Return a sorted list of scenes""" + assert cls.check_config_paths_exist(config) + dataset = cls(config) + return sorted( + {cls._scene_from_episode(episode) for episode in dataset.episodes} + ) + + def __init__(self, config: Optional[Config] = None) -> None: + self.episodes = [] + + if config is None: + return + + dataset_filename = config.DATA_PATH.format(split=config.SPLIT) + with gzip.open(dataset_filename, "rt") as f: + self.from_json(f.read(), scenes_dir=config.SCENES_DIR) + + if ALL_SCENES_MASK not in config.CONTENT_SCENES: + scenes_to_load = set(config.CONTENT_SCENES) + self.episodes = [ + episode + for episode in self.episodes + if self._scene_from_episode(episode) in scenes_to_load + ] + + if config.EPISODES_ALLOWED is not None: + ep_ids_before = {ep.episode_id for ep in self.episodes} + ep_ids_to_purge = ep_ids_before - set([ int(id) for id in config.EPISODES_ALLOWED]) + self.episodes = [ + episode + for episode in self.episodes + if episode.episode_id not in ep_ids_to_purge + ] + + def from_json( + self, json_str: str, scenes_dir: Optional[str] = None + ) -> None: + + deserialized = json.loads(json_str) + self.instruction_vocab = VocabDict( + word_list=deserialized["instruction_vocab"]["word_list"] + ) + + for episode in deserialized["episodes"]: + episode = VLNExtendedEpisode(**episode) + + if scenes_dir is not None: + if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX): + episode.scene_id = episode.scene_id[ + len(DEFAULT_SCENE_PATH_PREFIX) : + ] + + episode.scene_id = os.path.join(scenes_dir, episode.scene_id) + + episode.instruction = InstructionData(**episode.instruction) + if episode.goals is not None: + for g_index, goal in enumerate(episode.goals): + episode.goals[g_index] = NavigationGoal(**goal) + self.episodes.append(episode) + + random.shuffle(self.episodes) + + +@registry.register_dataset(name="RxR-VLN-CE-v1") +class RxRVLNCEDatasetV1(Dataset): + r"""Loads the RxR VLN-CE Dataset.""" + + episodes: List[VLNEpisode] + instruction_vocab: VocabDict + annotation_roles: List[str] = ["guide", "follower"] + languages: List[str] = ["en-US", "en-IN", "hi-IN", "te-IN"] + + @staticmethod + def _scene_from_episode(episode: VLNEpisode) -> str: + r"""Helper method to get the scene name from an episode. Assumes + the scene_id is formated /path/to/. + """ + return os.path.splitext(os.path.basename(episode.scene_id))[0] + + @staticmethod + def _language_from_episode(episode: VLNExtendedEpisode) -> str: + return episode.instruction.language + + @classmethod + def get_scenes_to_load(cls, config: Config) -> List[str]: + r"""Return a sorted list of scenes""" + assert cls.check_config_paths_exist(config) + dataset = cls(config) + return sorted( + {cls._scene_from_episode(episode) for episode in dataset.episodes} + ) + + @classmethod + def extract_roles_from_config(cls, config: Config) -> List[str]: + if ALL_ROLES_MASK in config.ROLES: + return cls.annotation_roles + assert set(config.ROLES).issubset(set(cls.annotation_roles)) + return config.ROLES + + @classmethod + def check_config_paths_exist(cls, config: Config) -> bool: + return all( + os.path.exists( + config.DATA_PATH.format(split=config.SPLIT, role=role) + ) + for role in cls.extract_roles_from_config(config) + ) and os.path.exists(config.SCENES_DIR) + + def __init__(self, config: Optional[Config] = None) -> None: + self.episodes = [] + self.config = config + + if config is None: + return + + for role in self.extract_roles_from_config(config): + with gzip.open( + config.DATA_PATH.format(split=config.SPLIT, role=role), "rt" + ) as f: + self.from_json(f.read(), scenes_dir=config.SCENES_DIR) + + if ALL_SCENES_MASK not in config.CONTENT_SCENES: + scenes_to_load = set(config.CONTENT_SCENES) + self.episodes = [ + episode + for episode in self.episodes + if self._scene_from_episode(episode) in scenes_to_load + ] + + if ALL_LANGUAGES_MASK not in config.LANGUAGES: + languages_to_load = set(config.LANGUAGES) + self.episodes = [ + episode + for episode in self.episodes + if self._language_from_episode(episode) in languages_to_load + ] + + if config.EPISODES_ALLOWED is not None: + ep_ids_before = {ep.episode_id for ep in self.episodes} + ep_ids_to_purge = ep_ids_before - set(config.EPISODES_ALLOWED) + self.episodes = [ + episode + for episode in self.episodes + if episode.episode_id not in ep_ids_to_purge + ] + + def from_json( + self, json_str: str, scenes_dir: Optional[str] = None + ) -> None: + + deserialized = json.loads(json_str) + + for episode in deserialized["episodes"]: + episode = VLNExtendedEpisode(**episode) + + if scenes_dir is not None: + if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX): + episode.scene_id = episode.scene_id[ + len(DEFAULT_SCENE_PATH_PREFIX) : + ] + + episode.scene_id = os.path.join(scenes_dir, episode.scene_id) + + episode.instruction = ExtendedInstructionData( + **episode.instruction + ) + episode.instruction.split = self.config.SPLIT + if episode.goals is not None: + for g_index, goal in enumerate(episode.goals): + episode.goals[g_index] = NavigationGoal(**goal) + self.episodes.append(episode) diff --git a/Downstream/Visual-Language-Navigation/habitat_extensions/utils.py b/Downstream/Visual-Language-Navigation/habitat_extensions/utils.py new file mode 100755 index 0000000..b2456d0 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/habitat_extensions/utils.py @@ -0,0 +1,92 @@ +from typing import Dict + +import numpy as np +from habitat.core.utils import try_cv2_import +from habitat.utils.visualizations import maps as habitat_maps +from habitat.utils.visualizations.utils import draw_collision + +from habitat_extensions import maps + +cv2 = try_cv2_import() + + +def observations_to_image(observation: Dict, info: Dict) -> np.ndarray: + r"""Generate image of single frame from observation and info + returned from a single environment step(). + + Args: + observation: observation returned from an environment step(). + info: info returned from an environment step(). + + Returns: + generated image of a single frame. + """ + egocentric_view = [] + observation_size = -1 + if "rgb" in observation: + observation_size = observation["rgb"].shape[0] + rgb = observation["rgb"][:, :, :3] + egocentric_view.append(rgb) + + # draw depth map if observation has depth info. resize to rgb size. + if "depth" in observation: + if observation_size == -1: + observation_size = observation["depth"].shape[0] + depth_map = (observation["depth"].squeeze() * 255).astype(np.uint8) + depth_map = np.stack([depth_map for _ in range(3)], axis=2) + depth_map = cv2.resize( + depth_map, + dsize=(observation_size, observation_size), + interpolation=cv2.INTER_CUBIC, + ) + egocentric_view.append(depth_map) + + assert ( + len(egocentric_view) > 0 + ), "Expected at least one visual sensor enabled." + egocentric_view = np.concatenate(egocentric_view, axis=1) + + # draw collision + if "collisions" in info and info["collisions"]["is_collision"]: + egocentric_view = draw_collision(egocentric_view) + + frame = egocentric_view + + map_k = None + if "top_down_map_vlnce" in info: + map_k = "top_down_map_vlnce" + elif "top_down_map" in info: + map_k = "top_down_map" + + if map_k is not None: + td_map = info[map_k]["map"] + + td_map = maps.colorize_topdown_map( + td_map, + info[map_k]["fog_of_war_mask"], + fog_of_war_desat_amount=0.75, + ) + td_map = habitat_maps.draw_agent( + image=td_map, + agent_center_coord=info[map_k]["agent_map_coord"], + agent_rotation=info[map_k]["agent_angle"], + agent_radius_px=min(td_map.shape[0:2]) // 24, + ) + if td_map.shape[1] < td_map.shape[0]: + td_map = np.rot90(td_map, 1) + + if td_map.shape[0] > td_map.shape[1]: + td_map = np.rot90(td_map, 1) + + # scale top down map to align with rgb view + old_h, old_w, _ = td_map.shape + top_down_height = observation_size + top_down_width = int(float(top_down_height) / old_h * old_w) + # cv2 resize (dsize is width first) + td_map = cv2.resize( + td_map, + (top_down_width, top_down_height), + interpolation=cv2.INTER_CUBIC, + ) + frame = np.concatenate((egocentric_view, td_map), axis=1) + return frame diff --git a/Downstream/Visual-Language-Navigation/iter_train.yaml b/Downstream/Visual-Language-Navigation/iter_train.yaml new file mode 100644 index 0000000..744508a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/iter_train.yaml @@ -0,0 +1,70 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/r2r_vlnce.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: HAMT +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/logs/tensorboard_dirs/ +CHECKPOINT_FOLDER: data/logs/checkpoints/ +CEPH_URL: s3://checkpoints/ +EVAL_CKPT_PATH_DIR: data/logs/checkpoints/ +RESULTS_DIR: data/logs/eval_results/ +VIDEO_DIR: data/logs/video/ +CEPH_IO: False + +INFERENCE: + SPLIT: test + USE_CKPT_CONFIG: False + SAMPLE: False + CKPT_PATH: '' # REPLACE THIS + PREDICTIONS_FILE: '' + FORMAT: r2r + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + CKPT_PATH_DIR: '' + ANG30: False + +IL: + iters: 10000 + log_every: 100 + lr: 2e-5 + batch_size: 1 # equal to NUM_ENVIRONMENTS + ml_weight: 1.0 + feedback: sample + resume: True + style_aug: False + + sample_ratio: 0.75 + decay_interval: 2000 + + max_traj_len: 25 + max_text_len: 200 + waypoint_aug: True + progress_monitor: False + +MODEL: + task_type: r2r + + policy_name: PolicyViewSelectionHAMT + NUM_ANGLES: 12 + pretrained_path: pretrained/HAMT/vitbase-6tasks-pretrain/model_step_130000.pt + fix_lang_embedding: True + fix_hist_embedding: True + fix_obs_embedding: False + + spatial_output: False + RGB_ENCODER: + backbone_type: VideoIntern-Base + output_size: 768 + DEPTH_ENCODER: + output_size: 256 + VISUAL_DIM: + vis_hidden: 768 + directional: 128 + INSTRUCTION_ENCODER: + bidirectional: True + diff --git a/Downstream/Visual-Language-Navigation/requirements.txt b/Downstream/Visual-Language-Navigation/requirements.txt new file mode 100644 index 0000000..01b06f6 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/requirements.txt @@ -0,0 +1,6 @@ +torch==1.8.0 +torchvision==0.9.0 +boto3 +pytorch_transformers==1.2.0 +timm==0.4.9 +transformers==4.12.3 \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run.py b/Downstream/Visual-Language-Navigation/run.py new file mode 100755 index 0000000..e70d4af --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 + +import argparse +import random +import os +import numpy as np +import torch +from habitat import logger +from habitat_baselines.common.baseline_registry import baseline_registry + +import habitat_extensions # noqa: F401 +import vlnce_baselines # noqa: F401 +from vlnce_baselines.config.default import get_config +# from vlnce_baselines.nonlearning_agents import ( +# evaluate_agent, +# nonlearning_inference, +# ) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--exp_name", + type=str, + default="test", + required=True, + help="experiment id that matches to exp-id in Notion log", + ) + parser.add_argument( + "--run-type", + choices=["train", "eval", "inference"], + required=True, + help="run type of the experiment (train, eval, inference)", + ) + parser.add_argument( + "--exp-config", + type=str, + required=True, + help="path to config yaml containing info about experiment", + ) + parser.add_argument( + "opts", + default=None, + nargs=argparse.REMAINDER, + help="Modify config options from command line", + ) + parser.add_argument('--local_rank', type=int, default=0, help="local gpu id") + args = parser.parse_args() + run_exp(**vars(args)) + + +def run_exp(exp_name: str, exp_config: str, + run_type: str, opts=None, local_rank=None) -> None: + r"""Runs experiment given mode and config + + Args: + exp_config: path to config file. + run_type: "train" or "eval. + opts: list of strings of additional config options. + + Returns: + None. + """ + config = get_config(exp_config, opts) + config.defrost() + + if run_type != 'train': + config.IL.resume = False + + config.TENSORBOARD_DIR += exp_name + config.CHECKPOINT_FOLDER += exp_name + config.CEPH_URL += exp_name + if os.path.isdir(config.EVAL_CKPT_PATH_DIR): + config.EVAL_CKPT_PATH_DIR += exp_name + config.RESULTS_DIR += exp_name + config.VIDEO_DIR += exp_name + # config.TASK_CONFIG.TASK.RXR_INSTRUCTION_SENSOR.max_text_len = config.IL.max_text_len + config.LOG_FILE = exp_name + '_' + config.LOG_FILE + + if 'CMA' in config.MODEL.policy_name and 'r2r' in config.BASE_TASK_CONFIG_PATH: + config.TASK_CONFIG.DATASET.DATA_PATH = 'data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}.json.gz' + + config.local_rank = local_rank + config.freeze() + os.system("mkdir -p data/logs/running_log") + logger.add_filehandler('data/logs/running_log/'+config.LOG_FILE) + + random.seed(config.TASK_CONFIG.SEED) + np.random.seed(config.TASK_CONFIG.SEED) + torch.manual_seed(config.TASK_CONFIG.SEED) + torch.backends.cudnn.benchmark = False + torch.backends.cudnn.deterministic = False + if torch.cuda.is_available(): + torch.set_num_threads(1) + + # if run_type == "eval" and config.EVAL.EVAL_NONLEARNING: + # evaluate_agent(config) + # return + + # if run_type == "inference" and config.INFERENCE.INFERENCE_NONLEARNING: + # nonlearning_inference(config) + # return + + trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME) + assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported" + trainer = trainer_init(config) + + # import pdb; pdb.set_trace() + if run_type == "train": + trainer.train() + elif run_type == "eval": + trainer.eval() + elif run_type == "inference": + trainer.inference() + +if __name__ == "__main__": + main() diff --git a/Downstream/Visual-Language-Navigation/run/batch_run.sh b/Downstream/Visual-Language-Navigation/run/batch_run.sh new file mode 100644 index 0000000..afea1e8 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/batch_run.sh @@ -0,0 +1,19 @@ +# srun -p GVT -n1 -c10 --gres=gpu:3 -x SH-IDC1-10-198-8-[61,62] --async \ +# -J hi bash run/cma_rxr.bash hi data/logs/checkpoints/cma_rxr_hi_slide1_oldcamera/ckpt.5.pth +# srun -p GVT -n1 -c10 --gres=gpu:3 -x SH-IDC1-10-198-8-[61,62] --async \ +# -J te bash run/cma_rxr.bash te data/logs/checkpoints/cma_rxr_te_slide1_oldcamera/ckpt.4.pth + +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] --async \ +# -J hi_eval bash run/cma_rxr_eval.bash hi +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] --async \ +# -J te_eval bash run/cma_rxr_eval.bash te + +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] --async \ +# -J en_s1>0 bash run/cma_rxr_eval.bash + +srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] --async \ +-J en_infer bash run/cma_rxr_eval.bash en data/logs/checkpoints/cma_rxr_en_slide1_oldcamera/ckpt.25.pth +srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] --async \ +-J hi_infer bash run/cma_rxr_eval.bash hi data/logs/checkpoints/cma_rxr_hi_slide1_oldcamera/ckpt.8.pth +srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] --async \ +-J te_infer bash run/cma_rxr_eval.bash te data/logs/checkpoints/cma_rxr_te_slide1_oldcamera/ckpt.9.pth \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/cma_r2r.bash b/Downstream/Visual-Language-Navigation/run/cma_r2r.bash new file mode 100755 index 0000000..95a0933 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/cma_r2r.bash @@ -0,0 +1,39 @@ +device=$1 +# train +# srun -p GVT -n1 -c10 --gres=gpu:3 -x SH-IDC1-10-198-8-[61,62] bash run/cma_r2r.bash 0,1,2 + +##################################################################### + +flag="--exp_name cma_r2r_slide1 + --run-type train + --exp-config exp/cma_r2r.yaml + + SIMULATOR_GPU_IDS [$device] + TORCH_GPU_IDS [$device] + GPU_NUMBERS 3 + + IL.batch_size 20 + IL.lr 1e-4 + IL.schedule_ratio 0.75 + IL.max_traj_len 20 + " +python -m torch.distributed.launch --nproc_per_node=3 run.py $flag + +##################################################################### + +# flag="--exp_name cma_r2r_slide0 +# --run-type train +# --exp-config exp/cma_r2r.yaml + +# SIMULATOR_GPU_IDS [$device] +# TORCH_GPU_IDS [$device] +# GPU_NUMBERS 3 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False + +# IL.batch_size 20 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python -m torch.distributed.launch --nproc_per_node=3 run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/cma_r2r_eval.bash b/Downstream/Visual-Language-Navigation/run/cma_r2r_eval.bash new file mode 100644 index 0000000..29b11c8 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/cma_r2r_eval.bash @@ -0,0 +1,84 @@ +# eval +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] bash run/cma_r2r_eval.bash + +##################################################################### + +# flag="--exp_name cma_r2r_slide1 +# --run-type eval +# --exp-config exp/cma_r2r.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [0] +# TORCH_GPU_ID 0 +# GPU_NUMBERS 1 + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_r2r_slide0 +# --run-type eval +# --exp-config exp/cma_r2r.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [0] +# TORCH_GPU_ID 0 +# GPU_NUMBERS 1 + +# VIDEO_OPTION ['tensorboard'] +# EVAL_CKPT_PATH_DIR data/logs/checkpoints/cma_r2r_slide0/ckpt.31.pth + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False + +# IL.batch_size 1 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_r2r_slide0 +# --run-type inference +# --exp-config exp/cma_r2r.yaml + +# SIMULATOR_GPU_IDS [1] +# TORCH_GPU_IDS [1] +# TORCH_GPU_ID 1 +# GPU_NUMBERS 1 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False + +# IL.batch_size 18 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name slide1_to_slide0 +# --run-type eval +# --exp-config exp/cma_r2r.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [3] +# TORCH_GPU_ID 3 +# GPU_NUMBERS 1 + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 + +# EVAL_CKPT_PATH_DIR data/logs/checkpoints/cma_r2r_slide1/ckpt.40.pth +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False +# " +# python run.py $flag diff --git a/Downstream/Visual-Language-Navigation/run/cma_rxr.bash b/Downstream/Visual-Language-Navigation/run/cma_rxr.bash new file mode 100644 index 0000000..245db62 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/cma_rxr.bash @@ -0,0 +1,97 @@ +# train +# srun -p GVT -n1 -c10 --gres=gpu:3 -x SH-IDC1-10-198-8-[61,62] bash run/cma_rxr.bash + + +##################################################################### + +# flag="--exp_name cma_rxr_slide0 +# --run-type train +# --exp-config exp/cma_rxr.yaml + +# SIMULATOR_GPU_IDS [0,1,2] +# TORCH_GPU_IDS [0,1,2] +# GPU_NUMBERS 3 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False + +# IL.batch_size 19 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python -m torch.distributed.launch --nproc_per_node=3 run.py $flag + +##################################################################### + +# flag="--exp_name cma_rxr_slide1 +# --run-type train +# --exp-config exp/cma_rxr.yaml + +# SIMULATOR_GPU_IDS [0,1,2] +# TORCH_GPU_IDS [0,1,2] +# GPU_NUMBERS 3 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + +# IL.batch_size 20 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python -m torch.distributed.launch --nproc_per_node=3 run.py $flag + +##################################################################### +# lang=$1 + +# flag="--exp_name cma_rxr_${lang}_slide1_oldcamera +# --run-type train +# --exp-config exp/cma_rxr_${lang}.yaml + +# SIMULATOR_GPU_IDS [0,1,2] +# TORCH_GPU_IDS [0,1,2] +# GPU_NUMBERS 3 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 +# RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] + +# IL.batch_size 20 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 + +# IL.load_from_ckpt True +# IL.ckpt_to_load $2 +# IL.is_requeue True +# " +# python -m torch.distributed.launch --nproc_per_node=3 run.py $flag +##################################################################### + +flag="--exp_name rxr.en_cma_oldcam.slide1 + --run-type train + --exp-config exp/cma_rxr_en.yaml + + SIMULATOR_GPU_IDS [0,1,2] + TORCH_GPU_IDS [0,1,2] + GPU_NUMBERS 3 + + TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 + RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] + + IL.batch_size 20 + IL.lr 3.5e-4 + IL.schedule_ratio 0.75 + IL.max_traj_len 20 + " +python -m torch.distributed.launch --nproc_per_node=3 run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/cma_rxr_eval.bash b/Downstream/Visual-Language-Navigation/run/cma_rxr_eval.bash new file mode 100644 index 0000000..e5ac28c --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/cma_rxr_eval.bash @@ -0,0 +1,169 @@ +# eval +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] bash run/cma_rxr_eval.bash + + +##################################################################### + +# flag="--exp_name cma_rxr_slide0 +# --run-type eval +# --exp-config exp/cma_rxr.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [0] +# TORCH_GPU_ID 0 +# GPU_NUMBERS 1 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_rxr_slide0 +# --run-type inference +# --exp-config exp/cma_rxr.yaml + +# SIMULATOR_GPU_IDS [$1] +# TORCH_GPU_IDS [$1] +# TORCH_GPU_ID $1 +# GPU_NUMBERS 1 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False + +# IL.batch_size 1 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_rxr_en_slide1>0 +# --run-type eval +# --exp-config exp/cma_rxr_en.yaml + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False +# EVAL_CKPT_PATH_DIR data/logs/checkpoints/cma_rxr_slide1/ckpt.15.pth + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_rxr_slide1_oldcamera +# --run-type eval +# --exp-config exp/cma_rxr.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [0] +# TORCH_GPU_ID 0 +# GPU_NUMBERS 1 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 +# RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_rxr_en_slide1>0 +# --run-type eval +# --exp-config exp/cma_rxr_en.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [0] +# TORCH_GPU_ID 0 +# GPU_NUMBERS 1 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 +# RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] +# EVAL_CKPT_PATH_DIR data/logs/checkpoints/cma_rxr_slide1_oldcamera/ckpt.25.pth + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +# flag="--exp_name cma_rxr_$1_slide1>0_oldcamera +# --run-type eval +# --exp-config exp/cma_rxr_$1.yaml + +# SIMULATOR_GPU_IDS [0] +# TORCH_GPU_IDS [0] +# TORCH_GPU_ID 0 +# GPU_NUMBERS 1 + +# TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING False +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 +# TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 +# TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 +# RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] +# EVAL_CKPT_PATH_DIR $2 + +# IL.batch_size 11 +# IL.lr 1e-4 +# IL.schedule_ratio 0.75 +# IL.max_traj_len 20 +# " +# python run.py $flag + +##################################################################### + +flag="--exp_name rxr.en_cma_oldcam.slide1 + --run-type eval + --exp-config exp/cma_rxr_en.yaml + + SIMULATOR_GPU_IDS [0] + TORCH_GPU_IDS [0] + TORCH_GPU_ID 0 + GPU_NUMBERS 1 + + TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 + RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] + + IL.batch_size 11 + IL.lr 3.5e-4 + IL.schedule_ratio 0.75 + IL.max_traj_len 20 + " +python run.py $flag diff --git a/Downstream/Visual-Language-Navigation/run/vlnbert_r2r.bash b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r.bash new file mode 100755 index 0000000..6135e81 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r.bash @@ -0,0 +1,16 @@ +# srun -p GVT -n1 -c10 --gres=gpu:3 -x SH-IDC1-10-198-8-[61,62] bash run/vlnbert_r2r.bash + +flag="--exp_name r2r_vlnbert_slide1 + --run-type train + --exp-config exp/vlnbert_r2r.yaml + + SIMULATOR_GPU_IDS [0,1,2] + TORCH_GPU_IDS [0,1,2] + GPU_NUMBERS 3 + + IL.batch_size 20 + IL.lr 3.5e-5 + IL.schedule_ratio 0.50 + IL.max_traj_len 20 + " +python -m torch.distributed.launch --nproc_per_node=3 run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_da.bash b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_da.bash new file mode 100644 index 0000000..13f1e2f --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_da.bash @@ -0,0 +1,53 @@ +# srun -p GVT -n1 -c5 --gres=gpu:4 -x SH-IDC1-10-198-8-[61,62,79] bash run/vlnbert_r2r_da.bash + +p=0.5 +bs=32 +diter=$1 +epoch=$2 +ngpus=4 +flag="--exp_name r2r_vlnbert_da.p${p}.bs${bs}.di${diter}.ep${epoch} + --run-type train + --exp-config exp/vlnbert_r2r_da.yaml + + SIMULATOR_GPU_IDS [0,1,2,3] + TORCH_GPU_IDS [0,1,2,3] + GPU_NUMBERS $ngpus + NUM_ENVIRONMENTS 15 + + IL.lr 3.5e-5 + IL.batch_size $bs + IL.max_traj_len 20 + IL.epochs $epoch + IL.DAGGER.iterations $diter + IL.DAGGER.update_size 6000 + IL.DAGGER.p $p + IL.DAGGER.preload_lmdb_features False + IL.DAGGER.lmdb_features_dir data/trajectories_dirs/r2r_vlnbert_da.p${p}.bs${bs}.di${diter}.ep${epoch}/trajectories.lmdb + " +python -m torch.distributed.launch --nproc_per_node=$ngpus run.py $flag + +# p=0.75 +# bs=8 +# diter=$1 +# epoch=$2 +# ngpus=2 +# flag="--exp_name r2r_vlnbert_da.p${p}.bs${bs}.di${diter}.ep${epoch} +# --run-type train +# --exp-config exp/vlnbert_r2r_da.yaml + +# SIMULATOR_GPU_IDS [0,1] +# TORCH_GPU_IDS [0,1] +# GPU_NUMBERS $ngpus +# NUM_ENVIRONMENTS 4 + +# IL.lr 3.5e-5 +# IL.batch_size $bs +# IL.max_traj_len 20 +# IL.epochs $epoch +# IL.DAGGER.iterations $diter +# IL.DAGGER.update_size 100 +# IL.DAGGER.p $p +# IL.DAGGER.preload_lmdb_features False +# IL.DAGGER.lmdb_features_dir data/trajectories_dirs/r2r_vlnbert_da.p${p}.bs${bs}.di${diter}.ep${epoch}/trajectories.lmdb +# " +# python -m torch.distributed.launch --nproc_per_node=$ngpus run.py $flag diff --git a/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_da_eval.bash b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_da_eval.bash new file mode 100644 index 0000000..364d8de --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_da_eval.bash @@ -0,0 +1,21 @@ +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62,79] bash run/vlnbert_r2r_da_eval.bash + +p=0.5 +bs=32 +diter=$1 +epoch=$2 +ngpus=4 +flag="--exp_name r2r_vlnbert_da.p${p}.bs${bs}.di${diter}.ep${epoch} + --run-type eval + --exp-config exp/vlnbert_r2r_da.yaml + + SIMULATOR_GPU_IDS [0] + TORCH_GPU_IDS [0] + GPU_NUMBERS $ngpus + NUM_ENVIRONMENTS 11 + + IL.lr 3.5e-5 + IL.batch_size 11 + IL.max_traj_len 20 + " +python run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_eval.bash b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_eval.bash new file mode 100644 index 0000000..c2173d9 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/vlnbert_r2r_eval.bash @@ -0,0 +1,18 @@ +# eval +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] bash run/vlnbert_r2r_eval.bash + +flag="--exp_name r2r_vlnbert_slide1 + --run-type eval + --exp-config exp/vlnbert_r2r.yaml + + SIMULATOR_GPU_IDS [0] + TORCH_GPU_IDS [0] + TORCH_GPU_ID 0 + GPU_NUMBERS 1 + + IL.batch_size 11 + IL.lr 3.5e-5 + IL.schedule_ratio 0.50 + IL.max_traj_len 20 + " +python run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/vlnbert_rxr.bash b/Downstream/Visual-Language-Navigation/run/vlnbert_rxr.bash new file mode 100644 index 0000000..e6dd577 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/vlnbert_rxr.bash @@ -0,0 +1,25 @@ +# srun -p GVT -n1 -c10 --gres=gpu:4 -x SH-IDC1-10-198-8-[61,62] bash run/vlnbert_rxr.bash + +flag="--exp_name rxr.en_vlnbert_oldcam.slide1 + --run-type train + --exp-config exp/vlnbert_rxr_en.yaml + + SIMULATOR_GPU_IDS [0,1,2,3] + TORCH_GPU_IDS [0,1,2,3] + GPU_NUMBERS 4 + + TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 + RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] + + IL.batch_size 15 + IL.lr 3.5e-5 + IL.schedule_ratio 0.50 + IL.max_traj_len 20 + " +python -m torch.distributed.launch --nproc_per_node=4 run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/run/vlnbert_rxr_eval.bash b/Downstream/Visual-Language-Navigation/run/vlnbert_rxr_eval.bash new file mode 100644 index 0000000..783baa0 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/run/vlnbert_rxr_eval.bash @@ -0,0 +1,26 @@ +# srun -p GVT -n1 -c5 --gres=gpu:1 -x SH-IDC1-10-198-8-[61,62] bash run/vlnbert_rxr_eval.bash + +flag="--exp_name rxr.en_vlnbert_oldcam.slide1 + --run-type eval + --exp-config exp/vlnbert_rxr_en.yaml + + SIMULATOR_GPU_IDS [0] + TORCH_GPU_IDS [0] + TORCH_GPU_ID 0 + GPU_NUMBERS 1 + + TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING True + TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT 224 + TASK_CONFIG.SIMULATOR.RGB_SENSOR.HFOV 90 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT 256 + TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HFOV 90 + RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS ['CenterCropperPerSensor'] + + IL.batch_size 11 + IL.lr 3.5e-5 + IL.schedule_ratio 0.50 + IL.max_traj_len 20 + " +python run.py $flag \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/__init__.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/__init__.py new file mode 100755 index 0000000..95d847a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/__init__.py @@ -0,0 +1,7 @@ +from vlnce_baselines import trainer_HAMT +from vlnce_baselines.common import environments + +from vlnce_baselines.models import ( + Policy_ViewSelection_CMA, + Policy_ViewSelection_HAMT, +) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/common/aux_losses.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/aux_losses.py new file mode 100755 index 0000000..f0c26ff --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/aux_losses.py @@ -0,0 +1,44 @@ +import torch + + +class _AuxLosses: + def __init__(self): + self._losses = {} + self._loss_alphas = {} + self._is_active = False + + def clear(self): + self._losses.clear() + self._loss_alphas.clear() + + def register_loss(self, name, loss, alpha=1.0): + assert self.is_active() + assert name not in self._losses + + self._losses[name] = loss + self._loss_alphas[name] = alpha + + def get_loss(self, name): + return self._losses[name] + + def reduce(self, mask): + assert self.is_active() + total = torch.tensor(0.0).cuda() + + for k in self._losses.keys(): + k_loss = torch.masked_select(self._losses[k], mask).mean() + total = total + self._loss_alphas[k] * k_loss + + return total + + def is_active(self): + return self._is_active + + def activate(self): + self._is_active = True + + def deactivate(self): + self._is_active = False + + +AuxLosses = _AuxLosses() diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/common/base_il_trainer.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/base_il_trainer.py new file mode 100755 index 0000000..a0b959a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/base_il_trainer.py @@ -0,0 +1,1186 @@ +import json +import jsonlines +import os +import sys +import time +import glob +import warnings +from collections import defaultdict +from typing import Dict, List + +import torch +import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP +import torch.distributed as distr +import torch.multiprocessing as mp +import gzip +import math +from copy import deepcopy + +import tqdm +from gym import Space +from habitat import Config, logger +from habitat.utils.visualizations.utils import append_text_to_image +from habitat_baselines.common.base_il_trainer import BaseILTrainer +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat_baselines.common.environments import get_env_class +from habitat_baselines.common.obs_transformers import ( + apply_obs_transforms_batch, + apply_obs_transforms_obs_space, + get_active_obs_transforms, +) +from habitat_extensions.measures import Position +from habitat_baselines.common.tensorboard_utils import TensorboardWriter +from habitat_baselines.utils.common import batch_obs, generate_video +from habitat_baselines.utils.common import ( + get_checkpoint_id, + poll_checkpoint_folder, +) + +from habitat_extensions.utils import observations_to_image +from vlnce_baselines.common.aux_losses import AuxLosses +from vlnce_baselines.common.env_utils import ( + construct_envs_auto_reset_false, + construct_envs, + is_slurm_batch_job, +) +from vlnce_baselines.common.utils import * + +from habitat_extensions.measures import NDTW +from fastdtw import fastdtw + +from ..utils import get_camera_orientations12 +from ..utils import ( + length2mask, dir_angle_feature, dir_angle_feature_with_ele, +) + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + import tensorflow as tf # noqa: F401 + + +class BaseVLNCETrainer(BaseILTrainer): + r"""A base trainer for VLN-CE imitation learning.""" + supported_tasks: List[str] = ["VLN-v0"] + + def __init__(self, config=None): + super().__init__(config) + self.policy = None + self.device = ( + torch.device("cuda", self.config.TORCH_GPU_ID) + if torch.cuda.is_available() + else torch.device("cpu") + ) + self.obs_transforms = [] + self.start_epoch = 0 + self.step_id = 0 + + def _initialize_policy( + self, + config: Config, + load_from_ckpt: bool, + observation_space: Space, + action_space: Space, + ) -> None: + policy = baseline_registry.get_policy(self.config.MODEL.policy_name) + self.policy = policy.from_config( + config=config, + observation_space=observation_space, + action_space=action_space, + ) + ''' initialize the waypoint predictor here ''' + from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM + self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device) + self.waypoint_predictor.load_state_dict( + torch.load( + 'pretrained/wp_pred/waypoint_predictor', + map_location = torch.device('cpu'), + )['predictor']['state_dict'] + ) + for param in self.waypoint_predictor.parameters(): + param.requires_grad_(False) + + self.policy.to(self.device) + self.waypoint_predictor.to(self.device) + self.num_recurrent_layers = self.policy.net.num_recurrent_layers + + if self.config.GPU_NUMBERS > 1: + print('Using', self.config.GPU_NUMBERS,'GPU!') + self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device], + output_device=self.device, find_unused_parameters=True, broadcast_buffers=False) + # self.waypoint_predictor = DDP(self.waypoint_predictor.to(self.device), device_ids=[self.device], + # output_device=self.device, find_unused_parameters=True, broadcast_buffers=False) + + self.optimizer = torch.optim.AdamW( + self.policy.parameters(), lr=self.config.IL.lr, + ) + + if load_from_ckpt: + ckpt_path = config.IL.ckpt_to_load + ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu") + + if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1: + self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device), + device_ids=[self.device], output_device=self.device) + self.policy.load_state_dict(ckpt_dict["state_dict"]) + self.policy.net = self.policy.net.module + self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device), + device_ids=[self.device], output_device=self.device) + # self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"]) + # self.waypoint_predictor = self.waypoint_predictor.module + else: + self.policy.load_state_dict(ckpt_dict["state_dict"]) + # self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"]) + if config.IL.is_requeue: + self.optimizer.load_state_dict(ckpt_dict["optim_state"]) + self.start_epoch = ckpt_dict["epoch"] + 1 + self.step_id = ckpt_dict["step_id"] + logger.info(f"Loaded weights from checkpoint: {ckpt_path}") + + self.waypoint_predictor.eval() + + params = sum(param.numel() for param in self.policy.parameters()) + params_t = sum( + p.numel() for p in self.policy.parameters() if p.requires_grad + ) + logger.info(f"Agent parameters: {params/1e6} MB. Trainable: {params_t/1e6} MB") + logger.info("Finished setting up policy.") + + # def save_checkpoint(self, file_name) -> None: + # r"""Save checkpoint with specified name. + + # Args: + # file_name: file name for checkpoint + + # Returns: + # None + # """ + # checkpoint = { + # "state_dict": self.policy.state_dict(), + # "config": self.config, + # } + # torch.save( + # checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name) + # ) + + def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict: + return torch.load(checkpoint_path, *args, **kwargs) + + # def _update_agent( + # self, + # observations, + # prev_actions, + # not_done_masks, + # corrected_actions, + # weights, + # step_grad: bool = True, + # loss_accumulation_scalar: int = 1, + # ): + # T, N = corrected_actions.size() + + # recurrent_hidden_states = torch.zeros( + # N, + # self.num_recurrent_layers, + # self.config.MODEL.STATE_ENCODER.hidden_size, + # device=self.device, + # ) + # AuxLosses.clear() + # # observations['rgb'] = observations['rgb'][0:2] + # # observations['depth'] = observations['depth'][0:2] + # # observations['rxr_instruction'] = observations['rxr_instruction'][0:2] + # # not_done_masks = not_done_masks[0:2] + # # prev_actions = prev_actions[0:2] + + # distribution = self.policy.build_distribution( + # observations, recurrent_hidden_states, prev_actions, not_done_masks) + + # logits = distribution.logits + # logits = logits.view(T, N, -1) + + # action_loss = F.cross_entropy( + # logits.permute(0, 2, 1), corrected_actions, reduction="none" + # ) + # action_loss = ((weights * action_loss).sum(0) / weights.sum(0)).mean() + + # aux_mask = (weights > 0).view(-1) + # aux_loss = AuxLosses.reduce(aux_mask) + + # loss = action_loss + aux_loss + # loss = loss / loss_accumulation_scalar + # loss.backward() + + # if step_grad: + # self.optimizer.step() + # self.optimizer.zero_grad() + + # # if isinstance(aux_loss, torch.Tensor): + # # aux_loss = aux_loss.item() + # # return loss.item(), action_loss.item(), aux_loss + + # return loss, action_loss, aux_loss + + @staticmethod + def _pause_envs( + envs_to_pause, + envs, + recurrent_hidden_states, + not_done_masks, + prev_actions, + batch, + rgb_frames=None, + # positions=None + ): + # pausing envs with no new episode + if len(envs_to_pause) > 0: + state_index = list(range(envs.num_envs)) + for idx in reversed(envs_to_pause): + state_index.pop(idx) + envs.pause_at(idx) + # positions.pop(idx) + + # indexing along the batch dimensions + recurrent_hidden_states = recurrent_hidden_states[state_index] + not_done_masks = not_done_masks[state_index] + prev_actions = prev_actions[state_index] + + for k, v in batch.items(): + batch[k] = v[state_index] + + if rgb_frames is not None: + rgb_frames = [rgb_frames[i] for i in state_index] + + return ( + envs, + recurrent_hidden_states, + not_done_masks, + prev_actions, + batch, + rgb_frames, + # positions + ) + + def _eval_checkpoint( + self, + checkpoint_path: str, + writer: TensorboardWriter, + checkpoint_index: int = 0, + ) -> None: + r"""Evaluates a single checkpoint. + + Args: + checkpoint_path: path of checkpoint + writer: tensorboard writer object + checkpoint_index: index of the current checkpoint + + Returns: + None + """ + if self.local_rank < 1: + logger.info(f"checkpoint_path: {checkpoint_path}") + + if self.config.EVAL.USE_CKPT_CONFIG: + config = self._setup_eval_config( + self.load_checkpoint(checkpoint_path, map_location="cpu")[ + "config" + ] + ) + else: + config = self.config.clone() + config.defrost() + # config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT + # config.TASK_CONFIG.DATASET.ROLES = ["guide"] + # config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES + # config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT + # config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + -1 + ) + config.IL.ckpt_to_load = checkpoint_path + if len(config.VIDEO_OPTION) > 0: + config.defrost() + config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE") + config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS") + config.freeze() + + if config.EVAL.SAVE_RESULTS: + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json", + ) + if os.path.exists(fname): + print("skipping -- evaluation exists.") + return + + envs = construct_envs( + config, get_env_class(config.ENV_NAME), + auto_reset_done=False, + episodes_allowed=self.traj # split by rank + ) + + dataset_length = sum(envs.number_of_episodes) + print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length) + + obs_transforms = get_active_obs_transforms(config) + observation_space = apply_obs_transforms_obs_space( + envs.observation_spaces[0], obs_transforms + ) + self._initialize_policy( + config, + load_from_ckpt=True, + observation_space=observation_space, + action_space=envs.action_spaces[0], + ) + self.policy.eval() + self.waypoint_predictor.eval() + + observations = envs.reset() + observations = extract_instruction_tokens( + observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + if 'CMA' in self.config.MODEL.policy_name: + rnn_states = torch.zeros( + envs.num_envs, + self.num_recurrent_layers, + config.MODEL.STATE_ENCODER.hidden_size, + device=self.device, + ) + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t = torch.zeros( + envs.num_envs, 768, + device=self.device, + ) + language_features = torch.zeros( + envs.num_envs, 80, 768, + device=self.device, + ) + + not_done_masks = torch.zeros( + envs.num_envs, 1, dtype=torch.uint8, device=self.device + ) + + stats_episodes = {} + + rgb_frames = [[] for _ in range(envs.num_envs)] + if len(config.VIDEO_OPTION) > 0: + os.makedirs(config.VIDEO_DIR, exist_ok=True) + + if config.EVAL.EPISODE_COUNT == -1: + episodes_to_eval = sum(envs.number_of_episodes) + else: + episodes_to_eval = min( + config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes) + ) + + pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None + log_str = ( + f"[Ckpt: {checkpoint_index}]" + " [Episodes evaluated: {evaluated}/{total}]" + " [Time elapsed (s): {time}]" + ) + start_time = time.time() + + # number = 0 + total_weight = 0. + ml_loss = 0. + bpositions = [[] for _ in range(envs.num_envs)] + while envs.num_envs > 0 and len(stats_episodes) < episodes_to_eval: + current_episodes = envs.current_episodes() + positions = []; headings = [] + for ob_i in range(len(current_episodes)): + agent_state_i = envs.call_at(ob_i, "get_agent_info", {}) + positions.append(agent_state_i['position']) + headings.append(agent_state_i['heading']) + + with torch.no_grad(): + if 'CMA' in self.config.MODEL.policy_name: + # instructions + instruction_embedding, all_lang_masks = self.policy.net( + mode = "language", + observations = batch, + ) + + # candidate waypoints prediction + cand_rgb, cand_depth, \ + cand_direction, cand_mask, candidate_lengths, \ + batch_angles, batch_distances = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + # navigation action logits + logits, rnn_states = self.policy.net( + mode = 'navigation', + observations = batch, + instruction = instruction_embedding, + text_mask = all_lang_masks, + rnn_states = rnn_states, + headings = headings, + cand_rgb = cand_rgb, + cand_depth = cand_depth, + cand_direction = cand_direction, + cand_mask = cand_mask, + masks = not_done_masks, + ) + logits = logits.masked_fill_(cand_mask, -float('inf')) + + elif 'VLNBERT' in self.config.MODEL.policy_name: + if 'R2R' in self.config.TASK_CONFIG.DATASET.DATA_PATH: + lang_idx_tokens = batch['instruction'] + padding_idx = 0 + lang_masks = (lang_idx_tokens != padding_idx) + lang_lengths = lang_masks.sum(1) + lang_token_type_ids = torch.zeros_like(lang_masks, + dtype=torch.long, device=self.device) + h_t_flag = h_t.sum(1)==0.0 + h_t_init, language_features = self.policy.net( + mode='language', + lang_idx_tokens=lang_idx_tokens, + lang_masks=lang_masks) + elif 'RxR' in self.config.TASK_CONFIG.DATASET.DATA_PATH: + to_be_masked = ((torch.abs(batch['rxr_instruction']) == 0)*1.).mean(-1) + lang_masks = torch.ones_like(to_be_masked) - to_be_masked + # lang_lengths = all_lang_masks.sum(1) + h_t_flag = h_t.sum(1)==0.0 + h_t_init, language_features = self.policy.net( + mode='language', + observations=batch, + lang_masks=lang_masks, + ) + else: + raise NotImplementedError + h_t[h_t_flag] = h_t_init[h_t_flag] + language_features = torch.cat( + (h_t.unsqueeze(1), language_features[:,1:,:]), dim=1) + # candidate waypoints prediction + cand_rgb, cand_depth, \ + cand_direction, cand_mask, candidate_lengths, \ + batch_angles, batch_distances = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + # navigation action logits + logits, h_t = self.policy.net( + mode = 'navigation', + observations=batch, + lang_masks=lang_masks, + lang_feats=language_features, + # lang_token_type_ids=lang_token_type_ids, + headings=headings, + cand_rgb = cand_rgb, + cand_depth = cand_depth, + cand_direction = cand_direction, + cand_mask = cand_mask, + masks = not_done_masks, + ) + logits = logits.masked_fill_(cand_mask, -float('inf')) + + # high-to-low actions in environments + actions = logits.argmax(dim=-1, keepdim=True) + env_actions = [] + for j in range(logits.size(0)): + if actions[j].item() == candidate_lengths[j]-1: + env_actions.append({'action': + {'action': 0, 'action_args':{}}}) + else: + env_actions.append({'action': + {'action': 4, # HIGHTOLOW + 'action_args':{ + 'angle': batch_angles[j][actions[j].item()], + 'distance': batch_distances[j][actions[j].item()], + }}}) + + outputs = envs.step(env_actions) + observations, _, dones, infos = [list(x) for x in zip(*outputs)] + for j, ob in enumerate(observations): + if env_actions[j]['action']['action'] == 0: + continue + else: + envs.call_at(j, + 'change_current_path', # to update and record low-level path + {'new_path': ob.pop('positions'), + 'collisions': ob.pop('collisions')} + ) + + not_done_masks = torch.tensor( + [[0] if done else [1] for done in dones], + dtype=torch.uint8, device=self.device) + + # reset envs and observations if necessary + for i in range(envs.num_envs): + if len(config.VIDEO_OPTION) > 0: + frame = observations_to_image(observations[i], infos[i]) + frame = append_text_to_image( + frame, current_episodes[i].instruction.instruction_text + ) + rgb_frames[i].append(frame) + + if not dones[i]: + continue + + # ep done, calculate metrics + info = infos[i] + metric = {} + metric['steps_taken'] = info['steps_taken'] + ep_id = str(envs.current_episodes()[i].episode_id) + gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float) + if 'current_path' in envs.current_episodes()[i].info.keys(): + positions_ = np.array(envs.current_episodes()[i].info['current_path']).astype(np.float) + collisions_ = np.array(envs.current_episodes()[i].info['collisions']) + assert collisions_.shape[0] == positions_.shape[0] - 1 + else: + positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float) + distance = np.array(info['position']['distance']).astype(np.float) + metric['distance_to_goal'] = distance[-1] + metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0. + metric['oracle_success'] = 1. if (distance <= 3.).any() else 0. + metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum() + try: + metric['collisions'] = collisions_.mean() + except: + metric['collisions'] = 0 + pass + + gt_length = distance[0] + metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length']) + + act_con_path = positions_ + gt_con_path = np.array(dis_to_con(gt_path)).astype(np.float) + dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0] + nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE)) + + metric['ndtw'] = nDTW + stats_episodes[current_episodes[i].episode_id] = metric + + observations[i] = envs.reset_at(i)[0] # envs[i] change to next episode + + if 'CMA' in self.config.MODEL.policy_name: + rnn_states[i] *= 0. + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t[i] *= 0. + + if config.use_pbar: + pbar.update() + else: + logger.info( + log_str.format( + evaluated=len(stats_episodes), + total=episodes_to_eval, + time=round(time.time() - start_time), + ) + ) + + if len(config.VIDEO_OPTION) > 0: + generate_video( + video_option=config.VIDEO_OPTION, + video_dir=config.VIDEO_DIR, + images=rgb_frames[i], + episode_id=current_episodes[i].episode_id, + checkpoint_idx=checkpoint_index, + metrics={ + "spl": stats_episodes[ + current_episodes[i].episode_id + ]["spl"] + }, + tb_writer=writer, + fps=1, + ) + + # del stats_episodes[current_episodes[i].episode_id][ + # "top_down_map_vlnce" + # ] + # del stats_episodes[current_episodes[i].episode_id][ + # "collisions" + # ] + rgb_frames[i] = [] + + observations = extract_instruction_tokens( + observations, + self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + envs_to_pause = [] + next_episodes = envs.current_episodes() + + for i in range(envs.num_envs): + if next_episodes[i].episode_id in stats_episodes: # 出现了重复的ep,表示这个模拟器中的episode已经全部过了一遍 + envs_to_pause.append(i) + + if 'VLNBERT' in self.config.MODEL.policy_name: + rnn_states = h_t + + headings = torch.tensor(headings) + ( + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + rgb_frames, + # positions + ) = self._pause_envs( + envs_to_pause, + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + rgb_frames, + # positions + ) + headings = headings.tolist() + if 'VLNBERT' in self.config.MODEL.policy_name: + h_t = rnn_states + + envs.close() + if config.use_pbar: + pbar.close() + if self.world_size > 1: + distr.barrier() + aggregated_stats = {} + num_episodes = len(stats_episodes) + # print('rank', self.local_rank, 'evaluated',num_episodes, 'episodes') + for stat_key in next(iter(stats_episodes.values())).keys(): + aggregated_stats[stat_key] = ( + sum(v[stat_key] for v in stats_episodes.values()) + / num_episodes + ) + # print(self.local_rank, aggregated_stats) + total = torch.tensor(num_episodes).cuda() + if self.world_size > 1: + dist.reduce(total,dst=0) + total = total.item() + + if self.world_size > 1: + logger.info( + f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_stats}") + for k,v in aggregated_stats.items(): + v = torch.tensor(v*num_episodes).cuda() + # print(self.local_rank, k+':', v.item(), num_episodes, 'before reduce') + cat_v = gather_list_and_concat(v,self.world_size) + # print(self.local_rank, k+':', cat_v, num_episodes, 'after_reduce') + v = (sum(cat_v)/total).item() + # print(self.local_rank, k+':', v, num_episodes, 'after divide total') + aggregated_stats[k] = v + + split = config.TASK_CONFIG.DATASET.SPLIT + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json", + ) + with open(fname, "w") as f: + json.dump(stats_episodes, f, indent=4) + + if self.local_rank < 1: + if config.EVAL.SAVE_RESULTS: + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ckpt_{checkpoint_index}_{split}.json", + ) + with open(fname, "w") as f: + json.dump(aggregated_stats, f, indent=4) + + logger.info(f"Episodes evaluated: {total}") + checkpoint_num = checkpoint_index + 1 + for k, v in aggregated_stats.items(): + logger.info(f"Average episode {k}: {v:.6f}") + writer.add_scalar(f"eval_{k}/{split}", v, checkpoint_num) + + def collect_val_traj(self): + from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1 + trajectories = defaultdict(list) + split = self.config.TASK_CONFIG.DATASET.SPLIT + + if 'rxr' in self.config.BASE_TASK_CONFIG_PATH: + if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file: + gt_data = {} + for role in RxRVLNCEDatasetV1.annotation_roles: + if ( + ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES + and role not in self.config.TASK_CONFIG.DATASET.ROLES + ): + continue + + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.gt_file.format( + split=split, role=role + ), + "rt", + ) as f: + gt_data.update(json.load(f)) + else: + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.gt_path.format( + split=split) + ) as f: + gt_data = json.load(f) + else: + with gzip.open( + self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=split) + ) as f: + gt_data = json.load(f) + + self.gt_data = gt_data + + trajectories = gt_data + self.trajectories = gt_data + trajectories = list(trajectories.keys())[self.config.local_rank::self.config.GPU_NUMBERS] + + return trajectories + + def eval(self) -> None: + r"""Main method of trainer evaluation. Calls _eval_checkpoint() that + is specified in Trainer class that inherits from BaseRLTrainer + or BaseILTrainer + + Returns: + None + """ + self.device = ( + torch.device("cuda", self.config.TORCH_GPU_ID) + if torch.cuda.is_available() + else torch.device("cpu") + ) + + if "tensorboard" in self.config.VIDEO_OPTION: + assert ( + len(self.config.TENSORBOARD_DIR) > 0 + ), "Must specify a tensorboard directory for video display" + os.makedirs(self.config.TENSORBOARD_DIR, exist_ok=True) + if "disk" in self.config.VIDEO_OPTION: + assert ( + len(self.config.VIDEO_DIR) > 0 + ), "Must specify a directory for storing videos on disk" + + world_size = self.config.GPU_NUMBERS + self.world_size = world_size + self.local_rank = self.config.local_rank + + self.config.defrost() + # split = self.config.TASK_CONFIG.DATASET.SPLIT + # self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split + # self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split + self.config.TASK_CONFIG.DATASET.ROLES = ["guide"] + self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['POSITION', 'STEPS_TAKEN'] + self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]] + + if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS: + idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW') + self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWEVAL' + self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.EVAL.LANGUAGES + self.config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT + self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.config.EVAL.SPLIT + self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.config.EVAL.SPLIT + self.config.use_pbar = not is_slurm_batch_job() + # if 'rxr' in self.config.BASE_TASK_CONFIG_PATH: + # self.config.EVAL.trajectories_file = \ + # self.config.EVAL.trajectories_file[:-8] + '_w' + \ + # str(self.world_size) + '_r' + str(self.local_rank) + '.json.gz' + + # if choosing image + resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES + crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS + config = self.config.TASK_CONFIG + camera_orientations = get_camera_orientations12() + + # sensor_uuids = [] + for sensor_type in ["RGB", "DEPTH"]: + resizer_size = dict(resize_config)[sensor_type.lower()] + cropper_size = dict(crop_config)[sensor_type.lower()] + sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR") + + for action, orient in camera_orientations.items(): + camera_template = f"{sensor_type}_{action}" + camera_config = deepcopy(sensor) + camera_config.ORIENTATION = camera_orientations[action] + camera_config.UUID = camera_template.lower() + # sensor_uuids.append(camera_config.UUID) + setattr(config.SIMULATOR, camera_template, camera_config) + config.SIMULATOR.AGENT_0.SENSORS.append(camera_template) + resize_config.append((camera_template.lower(), resizer_size)) + crop_config.append((camera_template.lower(), cropper_size)) + + self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config + self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config + self.config.TASK_CONFIG = config + self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS + + self.config.freeze() + # self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + # self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + # -1 + # ) + torch.cuda.set_device(self.device) + if world_size > 1: + distr.init_process_group(backend='nccl', init_method='env://') + self.device = self.config.TORCH_GPU_IDS[self.local_rank] + torch.cuda.set_device(self.device) + self.config.defrost() + self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank] + self.config.freeze() + # + # if 'rxr' in self.config.BASE_TASK_CONFIG_PATH: + self.traj = self.collect_val_traj() + with TensorboardWriter( + self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs + ) as writer: + if os.path.isfile(self.config.EVAL.CKPT_PATH_DIR): + # evaluate singe checkpoint + proposed_index = get_checkpoint_id( + self.config.EVAL.CKPT_PATH_DIR + ) + if proposed_index is not None: + ckpt_idx = proposed_index + else: + ckpt_idx = 0 + self._eval_checkpoint( + self.config.EVAL.CKPT_PATH_DIR, + writer, + checkpoint_index=ckpt_idx, + ) + else: + # evaluate multiple checkpoints in order + # prev_ckpt_ind = -1 #TODO eval start index + evaluated = [] + while True: + current_ckpt = None + while current_ckpt is None: + checkpoint_folder = self.config.EVAL_CKPT_PATH_DIR + if not self.config.CEPH_IO: + models_paths = [p for p in filter(os.path.isfile, glob.glob(checkpoint_folder + "/*")) if p not in evaluated] + else: + models_paths = [os.path.join(self.config.CEPH_URL,p) for p in self.client.list(self.config.CEPH_URL) if os.path.join(self.config.CEPH_URL,p) not in evaluated] + if len(models_paths) > 0: + models_paths.sort(key=self._get_iter) + current_ckpt = models_paths[0] + prev_ckpt_ind = current_ckpt.split('.')[-2] + else: + current_ckpt = None + time.sleep(2) # sleep for 2 secs before polling again + # time.sleep(10) + if self.local_rank < 1: + logger.info(f"=======current_ckpt: {current_ckpt}=======") + # prev_ckpt_ind += 1 + self._eval_checkpoint( + checkpoint_path=current_ckpt, + writer=writer, + checkpoint_index=prev_ckpt_ind, + ) + evaluated.append(current_ckpt) + + def inference(self) -> None: + r"""Runs inference on a single checkpoint, creating a path predictions file.""" + checkpoint_path = self.config.INFERENCE.CKPT_PATH + logger.info(f"checkpoint_path: {checkpoint_path}") + + self.config.defrost() + self.config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT + self.config.TASK_CONFIG.DATASET.ROLES = ["guide"] + self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.INFERENCE.LANGUAGES + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + -1 + ) + self.config.IL.ckpt_to_load = self.config.INFERENCE.CKPT_PATH + self.config.TASK_CONFIG.TASK.MEASUREMENTS = [] + self.config.TASK_CONFIG.TASK.SENSORS = [ + s for s in self.config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s + ] + ########### Additional Config ########### + self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]] + if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS: + idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW') + self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFERENCE' + # if choosing image + resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES + crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS + config = self.config.TASK_CONFIG + camera_orientations = get_camera_orientations12() + for sensor_type in ["RGB", "DEPTH"]: + resizer_size = dict(resize_config)[sensor_type.lower()] + cropper_size = dict(crop_config)[sensor_type.lower()] + sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR") + + for action, orient in camera_orientations.items(): + camera_template = f"{sensor_type}_{action}" + camera_config = deepcopy(sensor) + camera_config.ORIENTATION = camera_orientations[action] + camera_config.UUID = camera_template.lower() + setattr(config.SIMULATOR, camera_template, camera_config) + config.SIMULATOR.AGENT_0.SENSORS.append(camera_template) + resize_config.append((camera_template.lower(), resizer_size)) + crop_config.append((camera_template.lower(), cropper_size)) + + self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config + self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config + self.config.TASK_CONFIG = config + self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS + # self.config.ENV_NAME = "VLNCEInferenceEnv" #TODO is this necessary? + self.config.freeze() + + if self.config.INFERENCE.USE_CKPT_CONFIG: + config = self._setup_eval_config( + self.load_checkpoint(checkpoint_path, map_location="cpu")[ + "config" + ] + ) + else: + config = self.config.clone() + config.defrost() + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + -1 + ) + config.IL.ckpt_to_load = checkpoint_path + config.freeze() + + eps = self.collect_val_traj() + envs = construct_envs( + config, get_env_class(config.ENV_NAME), + auto_reset_done=False, + episodes_allowed=eps[:10] if sys.gettrace() else None # for debug, ep subset + ) + + obs_transforms = get_active_obs_transforms(config) + observation_space = apply_obs_transforms_obs_space( + envs.observation_spaces[0], obs_transforms + ) + + self._initialize_policy( + config, + load_from_ckpt=True, + observation_space=observation_space, + action_space=envs.action_spaces[0], + ) + self.policy.eval() + self.waypoint_predictor.eval() + + observations = envs.reset() + observations = extract_instruction_tokens( + observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + if 'CMA' in self.config.MODEL.policy_name: + rnn_states = torch.zeros( + envs.num_envs, + self.num_recurrent_layers, + config.MODEL.STATE_ENCODER.hidden_size, + device=self.device, + ) + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t = torch.zeros( + envs.num_envs, 768, + device=self.device, + ) + language_features = torch.zeros( + envs.num_envs, 80, 768, + device=self.device, + ) + not_done_masks = torch.zeros( + envs.num_envs, 1, dtype=torch.uint8, device=self.device + ) + + episode_predictions = defaultdict(list) + # episode ID --> instruction ID for rxr predictions format + instruction_ids: Dict[str, int] = {} + + # populate episode_predictions with the starting state + current_episodes = envs.current_episodes() + for i in range(envs.num_envs): + episode_predictions[current_episodes[i].episode_id].append( + envs.call_at(i, "get_agent_info", {}) + ) + if config.INFERENCE.FORMAT == "rxr": + ep_id = current_episodes[i].episode_id + k = current_episodes[i].instruction.instruction_id + instruction_ids[ep_id] = int(k) + + with tqdm.tqdm( + total=sum(envs.count_episodes()), + desc=f"[inference:{self.config.INFERENCE.SPLIT}]", + ) as pbar: + while envs.num_envs > 0: + current_episodes = envs.current_episodes() + positions = []; headings = [] + for i in range(envs.num_envs): + agent_state_i = envs.call_at(i,"get_agent_info", {}) + positions.append(agent_state_i['position']) + headings.append(agent_state_i['heading']) + + with torch.no_grad(): + if 'CMA' in self.config.MODEL.policy_name: + # instructions + instruction_embedding, all_lang_masks = self.policy.net( + mode = "language", + observations = batch, + ) + + # candidate waypoints prediction + cand_rgb, cand_depth, \ + cand_direction, cand_mask, candidate_lengths, \ + batch_angles, batch_distances = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + # navigation action logits + logits, rnn_states = self.policy.net( + mode = 'navigation', + observations = batch, + instruction = instruction_embedding, + text_mask = all_lang_masks, + rnn_states = rnn_states, + headings = headings, + cand_rgb = cand_rgb, + cand_depth = cand_depth, + cand_direction = cand_direction, + cand_mask = cand_mask, + masks = not_done_masks, + ) + logits = logits.masked_fill_(cand_mask, -float('inf')) + + # high-to-low actions in environments + actions = logits.argmax(dim=-1, keepdim=True) + env_actions = [] + for j in range(logits.size(0)): + if actions[j].item() == candidate_lengths[j]-1: + env_actions.append({'action': + {'action': 0, 'action_args':{}}}) + else: + env_actions.append({'action': + {'action': 4, # HIGHTOLOW + 'action_args':{ + 'angle': batch_angles[j][actions[j].item()], + 'distance': batch_distances[j][actions[j].item()], + }}}) + + outputs = envs.step(env_actions) + observations, _, dones, infos = [list(x) for x in zip(*outputs)] + for i, ob in enumerate(observations): + if env_actions[i]['action']['action'] == 0: + continue + else: + envs.call_at( + i, 'update_cur_path', {'new_path': ob.pop('cur_path')} + ) # to update and record low-level path + + not_done_masks = torch.tensor( + [[0] if done else [1] for done in dones], + dtype=torch.uint8, + device=self.device, + ) + + # reset envs and observations if necessary + for i in range(envs.num_envs): + if not dones[i]: + continue + + ep_id = envs.current_episodes()[i].episode_id + if 'cur_path' in envs.current_episodes()[i].info: + episode_predictions[ep_id] += envs.current_episodes()[i].info['cur_path'] + episode_predictions[ep_id][-1]['stop'] = True + # assert len(episode_predictions[ep_id]) <= 500 + + observations[i] = envs.reset_at(i)[0] + if 'CMA' in self.config.MODEL.policy_name: + rnn_states[i] *= 0. + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t[i] *= 0. + # prev_actions[i] = torch.zeros(1, dtype=torch.long) + pbar.update() + + observations = extract_instruction_tokens( + observations, + self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + envs_to_pause = [] + next_episodes = envs.current_episodes() + for i in range(envs.num_envs): + if not dones[i]: + continue + + if next_episodes[i].episode_id in episode_predictions: + envs_to_pause.append(i) + else: + episode_predictions[next_episodes[i].episode_id].append( + envs.call_at(i, "get_agent_info", {}) + ) + if config.INFERENCE.FORMAT == "rxr": + ep_id = next_episodes[i].episode_id + k = next_episodes[i].instruction.instruction_id + instruction_ids[ep_id] = int(k) + # number += 1 + + headings = torch.tensor(headings) + ( + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + rgb_frames, + # positions + ) = self._pause_envs( + envs_to_pause, + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + # rgb_frames, + # positions + ) + headings = headings.tolist() + if 'VLNBERT' in self.config.MODEL.policy_name: + h_t = rnn_states + + envs.close() + + if config.INFERENCE.FORMAT == "r2r": + with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f: + json.dump(episode_predictions, f, indent=2) + + logger.info( + f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}" + ) + else: # use 'rxr' format for rxr-habitat leaderboard + predictions_out = [] + + for k,v in episode_predictions.items(): + + # save only positions that changed + path = [v[0]["position"]] + for p in v[1:]: + if path[-1] != p["position"]: + path.append(p["position"]) + + predictions_out.append( + { + "instruction_id": instruction_ids[k], + "path": path, + } + ) + + predictions_out.sort(key=lambda x: x["instruction_id"]) + with jsonlines.open( + config.INFERENCE.PREDICTIONS_FILE, mode="w" + ) as writer: + writer.write_all(predictions_out) + + logger.info( + f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}" + ) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/common/env_utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/env_utils.py new file mode 100755 index 0000000..2e32cf7 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/env_utils.py @@ -0,0 +1,220 @@ +import os +import random +import sys +from typing import List, Optional, Type, Union + +import habitat +from habitat import logger +from habitat import Config, Env, RLEnv, VectorEnv, make_dataset +from habitat_baselines.utils.env_utils import make_env_fn + +random.seed(0) + +SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None) + + +def is_slurm_job() -> bool: + return SLURM_JOBID is not None + + +def is_slurm_batch_job() -> bool: + r"""Heuristic to determine if a slurm job is a batch job or not. Batch jobs + will have a job name that is not a shell unless the user specifically set the job + name to that of a shell. Interactive jobs have a shell name as their job name. + """ + return is_slurm_job() and os.environ.get("SLURM_JOB_NAME", None) not in ( + None, + "bash", + "zsh", + "fish", + "tcsh", + "sh", + ) + + +def construct_envs( + config: Config, + env_class: Type[Union[Env, RLEnv]], + workers_ignore_signals: bool = False, + auto_reset_done: bool = True, + episodes_allowed: Optional[List[str]] = None, +) -> VectorEnv: + r"""Create VectorEnv object with specified config and env class type. + To allow better performance, dataset are split into small ones for + each individual env, grouped by scenes. + :param config: configs that contain num_environments as well as information + :param necessary to create individual environments. + :param env_class: class type of the envs to be created. + :param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor + :param auto_reset_done: Whether or not to automatically reset the env on done + :return: VectorEnv object created according to specification. + """ + + num_envs_per_gpu = config.NUM_ENVIRONMENTS + if isinstance(config.SIMULATOR_GPU_IDS, list): + gpus = config.SIMULATOR_GPU_IDS + else: + gpus = [config.SIMULATOR_GPU_IDS] + num_gpus = len(gpus) + num_envs = num_gpus * num_envs_per_gpu + + if episodes_allowed is not None: + config.defrost() + config.TASK_CONFIG.DATASET.EPISODES_ALLOWED = episodes_allowed + config.freeze() + + configs = [] + env_classes = [env_class for _ in range(num_envs)] + dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE) + scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES + if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES: + scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET) + logger.info(f"SPLTI: {config.TASK_CONFIG.DATASET.SPLIT}, NUMBER OF SCENES: {len(scenes)}") + + if num_envs > 1: + if len(scenes) == 0: + raise RuntimeError( + "No scenes to load, multi-process logic relies on being able" + " to split scenes uniquely between processes" + ) + + if len(scenes) < num_envs and len(scenes) != 1: + raise RuntimeError( + "reduce the number of GPUs or envs as there" + " aren't enough number of scenes" + ) + + random.shuffle(scenes) + + if len(scenes) == 1: + scene_splits = [[scenes[0]] for _ in range(num_envs)] + else: + scene_splits = [[] for _ in range(num_envs)] + for idx, scene in enumerate(scenes): + scene_splits[idx % len(scene_splits)].append(scene) + + assert sum(map(len, scene_splits)) == len(scenes) + + for i in range(num_gpus): + for j in range(num_envs_per_gpu): + proc_config = config.clone() + proc_config.defrost() + proc_id = (i * num_envs_per_gpu) + j + + task_config = proc_config.TASK_CONFIG + task_config.SEED += proc_id + if len(scenes) > 0: + task_config.DATASET.CONTENT_SCENES = scene_splits[proc_id] + + task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpus[i] + + task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS + + proc_config.freeze() + configs.append(proc_config) + + # is_debug = True if sys.gettrace() else False + is_debug = False + env_entry = habitat.ThreadedVectorEnv if is_debug else habitat.VectorEnv + envs = env_entry( + make_env_fn=make_env_fn, + env_fn_args=tuple(zip(configs, env_classes)), + auto_reset_done=auto_reset_done, + workers_ignore_signals=workers_ignore_signals, + ) + return envs + + +def construct_envs_auto_reset_false( + config: Config, env_class: Type[Union[Env, RLEnv]] +) -> VectorEnv: + return construct_envs(config, env_class, auto_reset_done=False) + +def construct_envs_for_rl( + config: Config, + env_class: Type[Union[Env, RLEnv]], + workers_ignore_signals: bool = False, + auto_reset_done: bool = True, + episodes_allowed: Optional[List[str]] = None, +) -> VectorEnv: + r"""Create VectorEnv object with specified config and env class type. + To allow better performance, dataset are split into small ones for + each individual env, grouped by scenes. + :param config: configs that contain num_environments as well as information + :param necessary to create individual environments. + :param env_class: class type of the envs to be created. + :param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor + :param auto_reset_done: Whether or not to automatically reset the env on done + :return: VectorEnv object created according to specification. + """ + + num_envs_per_gpu = config.NUM_ENVIRONMENTS + if isinstance(config.SIMULATOR_GPU_IDS, list): + gpus = config.SIMULATOR_GPU_IDS + else: + gpus = [config.SIMULATOR_GPU_IDS] + num_gpus = len(gpus) + num_envs = num_gpus * num_envs_per_gpu + + if episodes_allowed is not None: + config.defrost() + config.TASK_CONFIG.DATASET.EPISODES_ALLOWED = episodes_allowed + config.freeze() + + configs = [] + env_classes = [env_class for _ in range(num_envs)] + dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE) + scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES + if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES: + scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET) + + if num_envs > 1: + if len(scenes) == 0: + raise RuntimeError( + "No scenes to load, multi-process logic relies on being able" + " to split scenes uniquely between processes" + ) + + if len(scenes) < num_envs and len(scenes) != 1: + raise RuntimeError( + "reduce the number of GPUs or envs as there" + " aren't enough number of scenes" + ) + random.shuffle(scenes) + + if len(scenes) == 1: + scene_splits = [[scenes[0]] for _ in range(num_envs)] + else: + scene_splits = [[] for _ in range(num_envs)] + for idx, scene in enumerate(scenes): + scene_splits[idx % len(scene_splits)].append(scene) + + assert sum(map(len, scene_splits)) == len(scenes) + + for i in range(num_gpus): + for j in range(num_envs_per_gpu): + proc_config = config.clone() + proc_config.defrost() + proc_id = (i * num_envs_per_gpu) + j + + task_config = proc_config.TASK_CONFIG + task_config.SEED += proc_id + if len(scenes) > 0: + task_config.DATASET.CONTENT_SCENES = scene_splits[proc_id] + + task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpus[i] + + task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS + + proc_config.freeze() + configs.append(proc_config) + + is_debug = True if sys.gettrace() else False + env_entry = habitat.ThreadedVectorEnv if is_debug else habitat.VectorEnv + envs = env_entry( + make_env_fn=make_env_fn, + env_fn_args=tuple(zip(configs, env_classes)), + auto_reset_done=auto_reset_done, + workers_ignore_signals=workers_ignore_signals, + ) + return envs diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/common/environments.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/environments.py new file mode 100755 index 0000000..fa66d53 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/environments.py @@ -0,0 +1,322 @@ +from collections import defaultdict +from typing import Any, Dict, Optional, Tuple, List, Union + +import habitat +import numpy as np +from habitat import Config, Dataset +from habitat.core.simulator import Observations +from habitat.tasks.utils import cartesian_to_polar +from habitat.utils.geometry_utils import quaternion_rotate_vector +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat.sims.habitat_simulator.actions import HabitatSimActions + +from habitat.core.simulator import SensorSuite +from habitat.core.registry import registry + + +@baseline_registry.register_env(name="VLNCEDaggerEnv") +class VLNCEDaggerEnv(habitat.RLEnv): + def __init__(self, config: Config, dataset: Optional[Dataset] = None): + super().__init__(config.TASK_CONFIG, dataset) + self.prev_episode_id = "something different" + self.keys = ['rgb', 'rgb_30', 'rgb_60', 'rgb_90', 'rgb_120', 'rgb_150', 'rgb_180', 'rgb_210', 'rgb_240', 'rgb_270', 'rgb_300', 'rgb_330'] + self.pano_rgbs_sensors = [{k:self._env.sim._sensors[k] for k in self.keys}] + + sim_sensors = [] + for sensor_name in ['RGB_SENSOR'] + [k.upper() for k in self.keys if k != 'rgb']: + sensor_cfg = getattr(self._env.sim.habitat_config, sensor_name) + sensor_type = registry.get_sensor(sensor_cfg.TYPE) + + assert sensor_type is not None, "invalid sensor type {}".format( + sensor_cfg.TYPE + ) + sim_sensors.append(sensor_type(sensor_cfg)) + self.sensor_suite = SensorSuite(sim_sensors) + + self.current_scene = self._env.sim._current_scene + + def reset(self) -> Observations: + observations = self._env.reset() + if self.current_scene != self._env.sim._current_scene: + self.pano_rgbs_sensors = [{k:self._env.sim._sensors[k] for k in self.keys}] + + sim_sensors = [] + for sensor_name in ['RGB_SENSOR'] + [k.upper() for k in self.keys if k != 'rgb']: + sensor_cfg = getattr(self._env.sim.habitat_config, sensor_name) + sensor_type = registry.get_sensor(sensor_cfg.TYPE) + + assert sensor_type is not None, "invalid sensor type {}".format( + sensor_cfg.TYPE + ) + sim_sensors.append(sensor_type(sensor_cfg)) + self.sensor_suite = SensorSuite(sim_sensors) + + self.current_scene = self._env.sim._current_scene + + return observations + + def get_reward_range(self) -> Tuple[float, float]: + # We don't use a reward for DAgger, but the baseline_registry requires + # we inherit from habitat.RLEnv. + return (0.0, 0.0) + + def get_reward(self, observations: Observations) -> float: + return 0.0 + + def get_done(self, observations: Observations) -> bool: + return self._env.episode_over + + def get_info(self, observations: Observations) -> Dict[Any, Any]: + return self.habitat_env.get_metrics() + + def get_metrics(self): + return self.habitat_env.get_metrics() + + def get_geodesic_dist(self, + node_a: List[float], node_b: List[float]): + return self._env.sim.geodesic_distance(node_a, node_b) + + def check_navigability(self, node: List[float]): + return self._env.sim.is_navigable(node) + + def get_agent_info(self): + agent_state = self._env.sim.get_agent_state() + heading_vector = quaternion_rotate_vector( + agent_state.rotation.inverse(), np.array([0, 0, -1]) + ) + heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1] + return { + "position": agent_state.position.tolist(), + "heading": heading, + "stop": self._env.task.is_stop_called, + } + + def get_observation_at(self, + source_position: List[float], + source_rotation: List[Union[int, np.float64]], + keep_agent_at_new_pose: bool = False): + return self._env.sim.get_observations_at( + source_position, + source_rotation, + keep_agent_at_new_pose) + + def observations_by_angles(self, angle_list: List[float]): + r'''for getting observations from desired angles + requires rad, positive represents anticlockwise''' + obs = [] + sim = self._env.sim + init_state = sim.get_agent_state() + prev_angle = 0 + left_action = HabitatSimActions.TURN_LEFT + init_amount = sim.get_agent(0).agent_config.action_space[left_action].actuation.amount # turn left + for angle in angle_list: + sim.get_agent(0).agent_config.action_space[left_action].actuation.amount = (angle-prev_angle)*180/np.pi + obs.append(sim.step(left_action)) + prev_angle = angle + sim.set_agent_state(init_state.position, init_state.rotation) + sim.get_agent(0).agent_config.action_space[left_action].actuation.amount = init_amount + return obs + + def current_dist_to_goal(self): + sim = self._env.sim + init_state = sim.get_agent_state() + init_distance = self._env.sim.geodesic_distance( + init_state.position, self._env.current_episode.goals[0].position, + ) + return init_distance + + def current_dist_to_refpath(self, path): + sim = self._env.sim + init_state = sim.get_agent_state() + current_pos = init_state.position + circle_dists = [] + for pos in path: + circle_dists.append( + self._env.sim.geodesic_distance(current_pos, pos) + ) + # circle_dists = np.linalg.norm(np.array(path)-current_pos, axis=1).tolist() + return circle_dists + + def get_cand_idx(self,ref_path,angles,distances,candidate_length): + episode_id = self._env.current_episode.episode_id + if episode_id != self.prev_episode_id: + self.progress = 0 + self.prev_sub_goal_pos = [0.0,0.0,0.0] + progress = self.progress + # ref_path = self.envs.current_episodes()[j].reference_path + circle_dists = self.current_dist_to_refpath(ref_path) + circle_bool = np.array(circle_dists) <= 3.0 + cand_dists_to_goal = [] + if circle_bool.sum() == 0: # no gt point within 3.0m + sub_goal_pos = self.prev_sub_goal_pos + else: + cand_idxes = np.where(circle_bool * (np.arange(0,len(ref_path))>=progress))[0] + if len(cand_idxes) == 0: + sub_goal_pos = ref_path[progress] #prev_sub_goal_pos[perm_index] + else: + compare = np.array(list(range(cand_idxes[0],cand_idxes[0]+len(cand_idxes)))) == cand_idxes + if np.all(compare): + sub_goal_idx = cand_idxes[-1] + else: + sub_goal_idx = np.where(compare==False)[0][0]-1 + sub_goal_pos = ref_path[sub_goal_idx] + self.progress = sub_goal_idx + + self.prev_sub_goal_pos = sub_goal_pos + + for k in range(len(angles)): + angle_k = angles[k] + forward_k = distances[k] + dist_k = self.cand_dist_to_subgoal(angle_k, forward_k, sub_goal_pos) + # distance to subgoal + cand_dists_to_goal.append(dist_k) + + # distance to final goal + curr_dist_to_goal = self.current_dist_to_goal() + # if within target range (which def as 3.0) + if curr_dist_to_goal < 1.5: + oracle_cand_idx = candidate_length - 1 + else: + oracle_cand_idx = np.argmin(cand_dists_to_goal) + + self.prev_episode_id = episode_id + # if curr_dist_to_goal == np.inf: + + progress100 = self.progress/len(ref_path) + return oracle_cand_idx, progress100#, sub_goal_pos + + def cand_dist_to_goal(self, angle: float, forward: float): + r'''get resulting distance to goal by executing + a candidate action''' + + sim = self._env.sim + init_state = sim.get_agent_state() + + forward_action = HabitatSimActions.MOVE_FORWARD + init_forward = sim.get_agent(0).agent_config.action_space[ + forward_action].actuation.amount + + theta = np.arctan2(init_state.rotation.imag[1], + init_state.rotation.real) + angle / 2 + rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0) + sim.set_agent_state(init_state.position, rotation) + + ksteps = int(forward//init_forward) + for k in range(ksteps): + sim.step_without_obs(forward_action) + post_state = sim.get_agent_state() + post_distance = self._env.sim.geodesic_distance( + post_state.position, self._env.current_episode.goals[0].position, + ) + + # reset agent state + sim.set_agent_state(init_state.position, init_state.rotation) + + return post_distance + + def cand_dist_to_subgoal(self, + angle: float, forward: float, + sub_goal: Any): + r'''get resulting distance to goal by executing + a candidate action''' + + sim = self._env.sim + init_state = sim.get_agent_state() + + forward_action = HabitatSimActions.MOVE_FORWARD + init_forward = sim.get_agent(0).agent_config.action_space[ + forward_action].actuation.amount + + theta = np.arctan2(init_state.rotation.imag[1], + init_state.rotation.real) + angle / 2 + rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0) + sim.set_agent_state(init_state.position, rotation) + + ksteps = int(forward//init_forward) + prev_pos = init_state.position + dis = 0. + for k in range(ksteps): + sim.step_without_obs(forward_action) + pos = sim.get_agent_state().position + dis += np.linalg.norm(prev_pos - pos) + prev_pos = pos + post_state = sim.get_agent_state() + + post_distance = self._env.sim.geodesic_distance( + post_state.position, sub_goal, + ) + dis + + # reset agent state + sim.set_agent_state(init_state.position, init_state.rotation) + + return post_distance + + def change_current_path(self, new_path: Any, collisions: Any): + '''just for recording current path in high to low''' + if self._env.current_episode.info is None: + self._env.current_episode.info = {} + if 'current_path' not in self._env.current_episode.info.keys(): + self._env.current_episode.info['current_path'] = [np.array(self._env.current_episode.start_position)] + self._env.current_episode.info['current_path'] += new_path + if 'collisions' not in self._env.current_episode.info.keys(): + self._env.current_episode.info['collisions'] = [] + self._env.current_episode.info['collisions'] += collisions + + # def draw_point(self,point,type,map): + # from scripts.draw_map_utils import drawpoint + # drawpoint(point,type,map,self._env.sim) + + def update_cur_path(self, new_path: Dict): + if self._env.current_episode.info is None: + self._env.current_episode.info = defaultdict(list) + if 'cur_path' not in self._env.current_episode.info: + self._env.current_episode.info['cur_path'] = [] + self._env.current_episode.info['cur_path'] += new_path + + def stop_cur_path(self): # not used + assert self._env.current_episode.info is not None + assert 'cur_path' in self._env.current_episode.info.keys() + self._env.current_episode.info['cur_path'][-1]['stop'] = True + + def get_pano_rgbs_observations_at(self, + source_position: List, + source_rotation: List,): + self._env.sim.set_agent_state(source_position,source_rotation) + pano_rgbs = self.sensor_suite.get_observations(self._env.sim.get_specific_sensors_observations(self.pano_rgbs_sensors)) + + return pano_rgbs + + def get_agent_state(self): + agent_state = self._env.sim.get_agent_state() + + return (agent_state.position,agent_state.rotation) + + def set_agent_state(self, position, rotation): + self._env.sim.set_agent_state(position,rotation) + + +@baseline_registry.register_env(name="VLNCEInferenceEnv") +class VLNCEInferenceEnv(habitat.RLEnv): + def __init__(self, config: Config, dataset: Optional[Dataset] = None): + super().__init__(config.TASK_CONFIG, dataset) + + def get_reward_range(self): + return (0.0, 0.0) + + def get_reward(self, observations: Observations): + return 0.0 + + def get_done(self, observations: Observations): + return self._env.episode_over + + def get_info(self, observations: Observations): + agent_state = self._env.sim.get_agent_state() + heading_vector = quaternion_rotate_vector( + agent_state.rotation.inverse(), np.array([0, 0, -1]) + ) + heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1] + return { + "position": agent_state.position.tolist(), + "heading": heading, + "stop": self._env.task.is_stop_called, + } diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/common/recollection_dataset.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/recollection_dataset.py new file mode 100755 index 0000000..525902c --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/recollection_dataset.py @@ -0,0 +1,297 @@ +import gzip +import json +from collections import defaultdict, deque + +import numpy as np +import torch +import tqdm +from gym import Space +from habitat.config.default import Config +from habitat.sims.habitat_simulator.actions import HabitatSimActions +from habitat_baselines.common.environments import get_env_class +from habitat_baselines.common.obs_transformers import ( + apply_obs_transforms_obs_space, + get_active_obs_transforms, +) + +from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1 +from vlnce_baselines.common.env_utils import construct_envs +from vlnce_baselines.common.utils import extract_instruction_tokens + + +class TeacherRecollectionDataset(torch.utils.data.IterableDataset): + def __init__(self, config: Config): + super().__init__() + self.config = config + # self._preload = [] + self._preload = deque() + self.world_size = self.config.GPU_NUMBERS + self.rank = self.config.local_rank + + assert ( + config.IL.RECOLLECT_TRAINER.preload_size >= config.IL.batch_size + ), "preload size must be greater than batch size." + self.envs = None + self._env_observations = None + + if config.IL.use_iw: + self.inflec_weights = torch.tensor( + [1.0, config.IL.inflection_weight_coef] + ) + else: + self.inflec_weights = torch.tensor([1.0, 1.0]) + + if self.config.IL.RECOLLECT_TRAINER.preload_trajectories_file: + self.config.defrost() + self.config.IL.RECOLLECT_TRAINER.trajectories_file = \ + self.config.IL.RECOLLECT_TRAINER.trajectories_file[ + :-8] + '_w' + \ + str(self.world_size) + '_r' + str(self.rank) + '.json.gz' + self.config.freeze() + with gzip.open( + config.IL.RECOLLECT_TRAINER.trajectories_file, "rt" + ) as f: + self.trajectories = json.load(f) + else: + self.trajectories = self.collect_dataset() + + self.initialize_sims() + + def initialize_sims(self): + config = self.config.clone() + config.defrost() + config.TASK_CONFIG.MEASUREMENTS = [] + config.freeze() + + self.envs = construct_envs( + config, + get_env_class(config.ENV_NAME), + episodes_allowed=list(self.trajectories.keys()), + ) + self.length = sum(self.envs.number_of_episodes) + self.obs_transforms = get_active_obs_transforms(self.config) + self._observation_space = apply_obs_transforms_obs_space( + self.envs.observation_spaces[0], self.obs_transforms + ) + + self.env_step = [0 for _ in range(self.envs.num_envs)] + self._env_observations = [[] for _ in range(self.envs.num_envs)] + + observations = self.envs.reset() + observations = extract_instruction_tokens( + observations, + self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + ) + for i, ep in enumerate(self.envs.current_episodes()): + path_step = self.trajectories[str(ep.episode_id)][0] + self._env_observations[i].append( + ( + observations[i], + path_step[0], # prev_action + path_step[2], # oracle_action + ) + ) + + @property + def batch_size(self): + return self.config.IL.batch_size + + @property + def observation_space(self) -> Space: + assert self.envs is not None, "Simulator must first be loaded." + assert self._observation_space is not None + return self._observation_space + + @property + def action_space(self) -> Space: + assert self.envs is not None, "Simulator must first be loaded." + return self.envs.action_spaces[0] + + def close_sims(self): + self.envs.close() + del self.envs + del self._env_observations + self.envs = None + self._env_observations = None + + def collect_dataset(self): + r"""Uses the ground truth trajectories to create a teacher forcing + datset for a given split. Loads both guide and follower episodes. + """ + trajectories = defaultdict(list) + split = self.config.TASK_CONFIG.DATASET.SPLIT + + if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file: + gt_data = {} + for role in RxRVLNCEDatasetV1.annotation_roles: + if ( + ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES + and role not in self.config.TASK_CONFIG.DATASET.ROLES + ): + continue + + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.gt_file.format( + split=split, role=role + ), + "rt", + ) as f: + gt_data.update(json.load(f)) + else: + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.gt_path.format(split=split) + ) as f: + gt_data = json.load(f) + + t = ( + tqdm.tqdm(gt_data.items(), "GT Collection") + if self.config.use_pbar + else gt_data.items() + ) + + for episode_id, trajectory in t: + if ( + self.config.IL.RECOLLECT_TRAINER.max_traj_len != -1 + and len(trajectory["actions"]) + > self.config.IL.RECOLLECT_TRAINER.max_traj_len + ) or ( + self.config.IL.RECOLLECT_TRAINER.min_traj_len != -1 + and len(trajectory["actions"]) + < self.config.IL.RECOLLECT_TRAINER.min_traj_len + ): + continue + + for i, action in enumerate(trajectory["actions"]): + prev_action = ( + trajectories[episode_id][i - 1][1] + if i + else HabitatSimActions.STOP + ) + + # [prev_action, action, oracle_action] + trajectories[episode_id].append([prev_action, action, action]) + + trajectories = dict(list(trajectories.items())[self.rank::self.world_size]) + self.config.defrost() + self.config.IL.RECOLLECT_TRAINER.trajectories_file = \ + self.config.IL.RECOLLECT_TRAINER.trajectories_file[:-8]+'_w'+ \ + str(self.world_size)+'_r'+str(self.rank) + '.json.gz' + self.config.freeze() + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.trajectories_file, "wt" + ) as f: + f.write(json.dumps(trajectories)) + return trajectories + + def _load_next(self): + """ + Episode length is currently not considered. We were previously batching episodes + together with similar lengths. Not sure if we need to bring that back. + """ + # self.rank = 0 + if len(self._preload): + # out = self._preload[self.rank] + # self._preload = self._preload[self.world_size:] + # return out + return self._preload.popleft() + + while ( + len(self._preload) < self.config.IL.RECOLLECT_TRAINER.preload_size + ): + current_episodes = self.envs.current_episodes() + prev_eps = current_episodes + + # get the next action for each env + actions = [ + self.trajectories[str(ep.episode_id)][self.env_step[i]][1] + for i, ep in enumerate(current_episodes) + ] + + outputs = self.envs.step(actions) + observations, _, dones, _ = [list(x) for x in zip(*outputs)] + observations = extract_instruction_tokens( + observations, + self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + ) + + current_episodes = self.envs.current_episodes() + + for i in range(self.envs.num_envs): + self.env_step[i] += 1 + if dones[i]: + assert len(self._env_observations[i]) == len( + self.trajectories[str(prev_eps[i].episode_id)] + ), "Collected episode does not match the step count of trajectory" + self._preload.append( + ( + [o[0] for o in self._env_observations[i]], + [o[1] for o in self._env_observations[i]], + [o[2] for o in self._env_observations[i]], + ) + ) + self._env_observations[i] = [] + self.env_step[i] = 0 + + path_step = self.trajectories[ + str(current_episodes[i].episode_id) + ][self.env_step[i]] + self._env_observations[i].append( + ( + observations[i], + path_step[0], # prev_action + path_step[2], # oracle_action + ) + ) + assert ( + len(self._env_observations[i]) + <= self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS + ), "Trajectories should be no more than the maximum episode steps." + + # out = self._preload[self.rank] + # self._preload = self._preload[self.world_size:] + # return out + return self._preload.popleft() + + def __next__(self): + """Takes about 1s to once self._load_next() has finished with a batch + size of 5. For this reason, we probably don't need to use extra workers. + """ + x = self._load_next() + obs, prev_actions, oracle_actions = x + + # transpose obs + obs_t = defaultdict(list) + for k in obs[0]: + for i in range(len(obs)): + obs_t[k].append(obs[i][k]) + + obs_t[k] = np.array(obs_t[k]) + + for k, v in obs_t.items(): + obs_t[k] = torch.from_numpy(np.copy(v)) + + prev_actions = torch.from_numpy(np.copy(prev_actions)) + oracle_actions = torch.from_numpy(np.copy(oracle_actions)) + + inflections = torch.cat( + [ + torch.tensor([1], dtype=torch.long), + (oracle_actions[1:] != oracle_actions[:-1]).long(), + ] + ) + + return ( + obs_t, + prev_actions, + oracle_actions, + self.inflec_weights[inflections], + ) + + def __iter__(self): + worker_info = torch.utils.data.get_worker_info() + if worker_info is not None: + assert ( + worker_info.num_workers == 1 + ), "multiple workers not supported." + + return self diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/common/utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/utils.py new file mode 100755 index 0000000..3a4f44a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/common/utils.py @@ -0,0 +1,70 @@ +from typing import Any, Dict, List +import torch +import torch.distributed as dist +import numpy as np +import copy +import math + +def extract_instruction_tokens( + observations: List[Dict], + instruction_sensor_uuid: str, + tokens_uuid: str = "tokens", + max_length: int = 512, + pad_id: int = 0, +) -> Dict[str, Any]: + r"""Extracts instruction tokens from an instruction sensor if the tokens + exist and are in a dict structure. + """ + for i in range(len(observations)): + if ( + isinstance(observations[i][instruction_sensor_uuid], dict) + and tokens_uuid in observations[i][instruction_sensor_uuid] + ): + # observations[i][instruction_sensor_uuid] = observations[i][ + # instruction_sensor_uuid + # ]["tokens"] + token = observations[i][instruction_sensor_uuid]["tokens"][:max_length] + if len(token) < max_length: + token += [pad_id] * (max_length - len(token)) + observations[i][instruction_sensor_uuid] = token + else: + break + return observations + +def gather_list_and_concat(list_of_nums,world_size): + if not torch.is_tensor(list_of_nums): + tensor = torch.Tensor(list_of_nums).cuda() + else: + if list_of_nums.is_cuda == False: + tensor = list_of_nums.cuda() + else: + tensor = list_of_nums + gather_t = [torch.ones_like(tensor) for _ in + range(world_size)] + dist.all_gather(gather_t, tensor) + return gather_t + +def dis_to_con(path, amount=0.25): + starts = path[:-1] + ends = path[1:] + new_path = [path[0]] + for s, e in zip(starts,ends): + vec = np.array(e) - np.array(s) + ratio = amount/np.linalg.norm(vec[[0,2]]) + unit = vec*ratio + times = int(1/ratio) + for i in range(times): + if i != times - 1: + location = np.array(new_path[-1])+unit + new_path.append(location.tolist()) + new_path.append(e) + + return new_path + +def get_camera_orientations12(): + base_angle_deg = 30 + base_angle_rad = math.pi / 6 + orient_dict = {} + for k in range(1,12): + orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0] + return orient_dict \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/__init__.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/default.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/default.py new file mode 100755 index 0000000..0033057 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/default.py @@ -0,0 +1,230 @@ +from typing import List, Optional, Union + +import habitat_baselines.config.default +from habitat.config.default import CONFIG_FILE_SEPARATOR +from habitat.config.default import Config as CN + +from habitat_extensions.config.default import ( + get_extended_config as get_task_config, +) + +# ----------------------------------------------------------------------------- +# EXPERIMENT CONFIG +# ----------------------------------------------------------------------------- +_C = CN() +_C.BASE_TASK_CONFIG_PATH = "habitat_extensions/config/vlnce_task.yaml" +_C.TASK_CONFIG = CN() # task_config will be stored as a config node +_C.TRAINER_NAME = "dagger" +_C.ENV_NAME = "VLNCEDaggerEnv" +_C.SIMULATOR_GPU_IDS = [0] +_C.VIDEO_OPTION = [] # options: "disk", "tensorboard" +_C.VIDEO_DIR = "videos/debug" +_C.TENSORBOARD_DIR = "data/tensorboard_dirs/debug" +_C.RESULTS_DIR = "data/checkpoints/pretrained/evals" + +# ----------------------------------------------------------------------------- +# EVAL CONFIG +# ----------------------------------------------------------------------------- +_C.EVAL = CN() +# The split to evaluate on +_C.EVAL.SPLIT = "val_seen" +_C.EVAL.EPISODE_COUNT = -1 +_C.EVAL.LANGUAGES = ["en-US", "en-IN"] +_C.EVAL.SAMPLE = False +_C.EVAL.SAVE_RESULTS = True +_C.EVAL.EVAL_NONLEARNING = False +_C.EVAL.NONLEARNING = CN() +_C.EVAL.NONLEARNING.AGENT = "RandomAgent" + +# ----------------------------------------------------------------------------- +# INFERENCE CONFIG +# ----------------------------------------------------------------------------- +_C.INFERENCE = CN() +_C.INFERENCE.SPLIT = "test" +_C.INFERENCE.LANGUAGES = ["en-US", "en-IN"] +_C.INFERENCE.SAMPLE = False +_C.INFERENCE.USE_CKPT_CONFIG = True +_C.INFERENCE.CKPT_PATH = "data/checkpoints/CMA_PM_DA_Aug.pth" +_C.INFERENCE.PREDICTIONS_FILE = "predictions.json" +_C.INFERENCE.INFERENCE_NONLEARNING = False +_C.INFERENCE.NONLEARNING = CN() +_C.INFERENCE.NONLEARNING.AGENT = "RandomAgent" +_C.INFERENCE.FORMAT = "rxr" # either 'rxr' or 'r2r' +# ----------------------------------------------------------------------------- +# IMITATION LEARNING CONFIG +# ----------------------------------------------------------------------------- +_C.IL = CN() +_C.IL.lr = 2.5e-4 +_C.IL.batch_size = 5 +_C.IL.epochs = 4 +_C.IL.use_iw = True +# inflection coefficient for RxR training set GT trajectories (guide): 1.9 +# inflection coefficient for R2R training set GT trajectories: 3.2 +_C.IL.inflection_weight_coef = 3.2 +# load an already trained model for fine tuning +_C.IL.waypoint_aug = False +_C.IL.load_from_ckpt = False +_C.IL.ckpt_to_load = "data/checkpoints/ckpt.0.pth" +# if True, loads the optimizer state, epoch, and step_id from the ckpt dict. +_C.IL.is_requeue = False +# it True, start training from the saved epoch +# ----------------------------------------------------------------------------- +# IL: RXR TRAINER CONFIG +# ----------------------------------------------------------------------------- +_C.IL.RECOLLECT_TRAINER = CN() +_C.IL.RECOLLECT_TRAINER.preload_trajectories_file = True +_C.IL.RECOLLECT_TRAINER.trajectories_file = ( + "data/trajectories_dirs/debug/trajectories.json.gz" +) +# if set to a positive int, episodes with longer paths are ignored in training +_C.IL.RECOLLECT_TRAINER.max_traj_len = -1 +# if set to a positive int, effective_batch_size must be some multiple of +# IL.batch_size. Gradient accumulation enables an arbitrarily high "effective" +# batch size. +_C.IL.RECOLLECT_TRAINER.effective_batch_size = -1 +_C.IL.RECOLLECT_TRAINER.preload_size = 30 +_C.IL.RECOLLECT_TRAINER.use_iw = True +_C.IL.RECOLLECT_TRAINER.gt_file = ( + "data/datasets/RxR_VLNCE_v0_enc_xlmr/{split}/{split}_{role}_gt.json.gz" +) +# ----------------------------------------------------------------------------- +# IL: DAGGER CONFIG +# ----------------------------------------------------------------------------- +_C.IL.DAGGER = CN() +_C.IL.DAGGER.iterations = 10 +_C.IL.DAGGER.update_size = 5000 +_C.IL.DAGGER.p = 0.75 +_C.IL.DAGGER.expert_policy_sensor = "SHORTEST_PATH_SENSOR" +_C.IL.DAGGER.expert_policy_sensor_uuid = "shortest_path_sensor" +_C.IL.DAGGER.load_space = False +# if True, load saved observation space and action space +_C.IL.DAGGER.lmdb_map_size = 1.0e12 +# if True, saves data to disk in fp16 and converts back to fp32 when loading. +_C.IL.DAGGER.lmdb_fp16 = False +# How often to commit the writes to the DB, less commits is +# better, but everything must be in memory until a commit happens/ +_C.IL.DAGGER.lmdb_commit_frequency = 500 +# If True, load precomputed features directly from lmdb_features_dir. +_C.IL.DAGGER.preload_lmdb_features = False +_C.IL.DAGGER.lmdb_features_dir = ( + "data/trajectories_dirs/debug/trajectories.lmdb" +) +# ----------------------------------------------------------------------------- +# RL CONFIG +# ----------------------------------------------------------------------------- +_C.RL = CN() +_C.RL.POLICY = CN() +_C.RL.POLICY.OBS_TRANSFORMS = CN() +_C.RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS = [ + "CenterCropperPerSensor", +] +_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR = CN() +_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = [ + ("rgb", (224, 224)), + ("depth", (256, 256)), +] +_C.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR = CN() +_C.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = [ + ("rgb", (224, 298)), + ("depth", (256, 341)), +] +# ----------------------------------------------------------------------------- +# MODELING CONFIG +# ----------------------------------------------------------------------------- +_C.MODEL = CN() +_C.MODEL.policy_name = "CMAPolicy" # or "Seq2SeqPolicy" +_C.MODEL.ablate_depth = False +_C.MODEL.ablate_rgb = False +_C.MODEL.ablate_instruction = False + +_C.MODEL.INSTRUCTION_ENCODER = CN() +_C.MODEL.INSTRUCTION_ENCODER.sensor_uuid = "instruction" +_C.MODEL.INSTRUCTION_ENCODER.vocab_size = 2504 +_C.MODEL.INSTRUCTION_ENCODER.use_pretrained_embeddings = True +_C.MODEL.INSTRUCTION_ENCODER.embedding_file = ( + "data/datasets/R2R_VLNCE_v1-2_preprocessed/embeddings.json.gz" +) +_C.MODEL.INSTRUCTION_ENCODER.dataset_vocab = ( + "data/datasets/R2R_VLNCE_v1-2_preprocessed/train/train.json.gz" +) +_C.MODEL.INSTRUCTION_ENCODER.fine_tune_embeddings = False +_C.MODEL.INSTRUCTION_ENCODER.embedding_size = 50 +_C.MODEL.INSTRUCTION_ENCODER.hidden_size = 128 +_C.MODEL.INSTRUCTION_ENCODER.rnn_type = "LSTM" +_C.MODEL.INSTRUCTION_ENCODER.final_state_only = True +_C.MODEL.INSTRUCTION_ENCODER.bidirectional = False + +_C.MODEL.spatial_output = True +_C.MODEL.RGB_ENCODER = CN() +_C.MODEL.RGB_ENCODER.backbone_type = "TorchVisionResNet50" +_C.MODEL.RGB_ENCODER.output_size = 256 + +_C.MODEL.DEPTH_ENCODER = CN() +_C.MODEL.DEPTH_ENCODER.backbone_type = "VlnResnetDepthEncoder" +_C.MODEL.DEPTH_ENCODER.output_size = 128 +# type of resnet to use +_C.MODEL.DEPTH_ENCODER.backbone = "resnet50" +# path to DDPPO resnet weights +_C.MODEL.DEPTH_ENCODER.ddppo_checkpoint = ( + "pretrained/ddppo-models/gibson-2plus-resnet50.pth" +) + +_C.MODEL.STATE_ENCODER = CN() +_C.MODEL.STATE_ENCODER.hidden_size = 512 +_C.MODEL.STATE_ENCODER.rnn_type = "GRU" + +_C.MODEL.SEQ2SEQ = CN() +_C.MODEL.SEQ2SEQ.use_prev_action = False + +_C.MODEL.PROGRESS_MONITOR = CN() +_C.MODEL.PROGRESS_MONITOR.use = False +_C.MODEL.PROGRESS_MONITOR.alpha = 1.0 # loss multiplier + + +def purge_keys(config: CN, keys: List[str]) -> None: + for k in keys: + del config[k] + config.register_deprecated_key(k) + + +def get_config( + config_paths: Optional[Union[List[str], str]] = None, + opts: Optional[list] = None, +) -> CN: + r"""Create a unified config with default values. Initialized from the + habitat_baselines default config. Overwritten by values from + `config_paths` and overwritten by options from `opts`. + Args: + config_paths: List of config paths or string that contains comma + separated list of config paths. + opts: Config options (keys, values) in a list (e.g., passed from + command line into the config. For example, `opts = ['FOO.BAR', + 0.5]`. Argument can be used for parameter sweeping or quick tests. + """ + config = CN() + config.merge_from_other_cfg(habitat_baselines.config.default._C) + purge_keys(config, ["SIMULATOR_GPU_ID", "TEST_EPISODE_COUNT"]) + config.merge_from_other_cfg(_C.clone()) + + if config_paths: + if isinstance(config_paths, str): + if CONFIG_FILE_SEPARATOR in config_paths: + config_paths = config_paths.split(CONFIG_FILE_SEPARATOR) + else: + config_paths = [config_paths] + + prev_task_config = "" + for config_path in config_paths: + config.merge_from_file(config_path) + if config.BASE_TASK_CONFIG_PATH != prev_task_config: + config.TASK_CONFIG = get_task_config( + config.BASE_TASK_CONFIG_PATH + ) + prev_task_config = config.BASE_TASK_CONFIG_PATH + + if opts: + config.CMD_TRAILING_OPTS = opts + config.merge_from_list(opts) + + config.freeze() + return config diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/nonlearning.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/nonlearning.yaml new file mode 100755 index 0000000..2b0fb74 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/nonlearning.yaml @@ -0,0 +1,17 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +EVAL: + SPLIT: val_unseen + # any num greater than the actual episode count evals every episode + EPISODE_COUNT: 10 + EVAL_NONLEARNING: True + NONLEARNING: + # RandomAgent or HandcraftedAgent + AGENT: RandomAgent + +INFERENCE: + SPLIT: val_unseen + PREDICTIONS_FILE: predictions.json + INFERENCE_NONLEARNING: True + NONLEARNING: + # RandomAgent or HandcraftedAgent + AGENT: "RandomAgent" diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma.yaml new file mode 100755 index 0000000..a3e6d04 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma.yaml @@ -0,0 +1,35 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +TRAINER_NAME: dagger # recollect_trainer +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma +CHECKPOINT_FOLDER: data/checkpoints/cma +EVAL_CKPT_PATH_DIR: data/checkpoints/cma + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 45 + batch_size: 5 + + RECOLLECT_TRAINER: + gt_file: + data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json.gz + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma/trajectories.lmdb + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_aug.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_aug.yaml new file mode 100755 index 0000000..c1222fc --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_aug.yaml @@ -0,0 +1,29 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task_aug.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_aug +CHECKPOINT_FOLDER: data/checkpoints/cma_aug +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_aug + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 45 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 157232 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma_aug/trajectories.lmdb + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_aug_tune.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_aug_tune.yaml new file mode 100755 index 0000000..d781880 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_aug_tune.yaml @@ -0,0 +1,31 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_aug_tune +CHECKPOINT_FOLDER: data/checkpoints/cma_aug_tune +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_aug_tune + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 45 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma/trajectories.lmdb + load_from_ckpt: True + ckpt_to_load: data/checkpoints/cma_aug/best_checkpoint.pth # REPLACE + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_da.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_da.yaml new file mode 100755 index 0000000..376661a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_da.yaml @@ -0,0 +1,31 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_da +CHECKPOINT_FOLDER: data/checkpoints/cma_da +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_da + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 4 + batch_size: 5 + + DAGGER: + iterations: 10 + update_size: 5000 + p: 0.75 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma_da/trajectories.lmdb + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_da_aug_tune.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_da_aug_tune.yaml new file mode 100755 index 0000000..10fa2e2 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_da_aug_tune.yaml @@ -0,0 +1,31 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_da_aug_tune +CHECKPOINT_FOLDER: data/checkpoints/cma_da_aug_tune +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_da_aug_tune + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 4 + batch_size: 5 + + DAGGER: + iterations: 10 + update_size: 5000 + p: 0.5 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma_da_aug_tune/trajectories.lmdb + load_from_ckpt: True + ckpt_to_load: data/checkpoints/cma_aug_tune/best_checkpoint.pth # REPLACE + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm.yaml new file mode 100755 index 0000000..02a8cab --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm.yaml @@ -0,0 +1,32 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_pm +CHECKPOINT_FOLDER: data/checkpoints/cma_pm +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_pm + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 45 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma/trajectories.lmdb + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_aug.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_aug.yaml new file mode 100755 index 0000000..cca6cea --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_aug.yaml @@ -0,0 +1,32 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task_aug.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_pm_aug +CHECKPOINT_FOLDER: data/checkpoints/cma_pm_aug +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_pm_aug + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 45 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 157232 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma_aug/trajectories.lmdb + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_aug_tune.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_aug_tune.yaml new file mode 100755 index 0000000..a4cb14c --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_aug_tune.yaml @@ -0,0 +1,31 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_pm_aug_tune +CHECKPOINT_FOLDER: data/checkpoints/cma_pm_aug_tune +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_pm_aug_tune + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 45 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma/trajectories.lmdb + load_from_ckpt: True + ckpt_to_load: data/checkpoints/cma_pm_aug/best_checkpoint.pth # REPLACE + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_da.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_da.yaml new file mode 100755 index 0000000..5480b5c --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_da.yaml @@ -0,0 +1,32 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_pm_da +CHECKPOINT_FOLDER: data/checkpoints/cma_pm_da +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_pm_da + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 4 + batch_size: 5 + + DAGGER: + iterations: 10 + update_size: 5000 + p: 0.75 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma_pm_da/trajectories.lmdb + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_da_aug_tune.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_da_aug_tune.yaml new file mode 100755 index 0000000..8275ff9 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_pm_da_aug_tune.yaml @@ -0,0 +1,34 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_pm_da_aug_tune +CHECKPOINT_FOLDER: data/checkpoints/cma_pm_da_aug_tune +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_pm_da_aug_tune + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 4 + batch_size: 5 + + DAGGER: + iterations: 10 + update_size: 5000 + p: 0.5 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/cma_pm_da_aug_tune/trajectories.lmdb + load_from_ckpt: True + ckpt_to_load: data/checkpoints/cma_pm_aug/best_checkpoint.pth # REPLACE + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_sf.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_sf.yaml new file mode 100755 index 0000000..3ddb880 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_sf.yaml @@ -0,0 +1,28 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_sf +CHECKPOINT_FOLDER: data/checkpoints/cma_sf +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_sf + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 50 + batch_size: 8 + schedule_ratio: 0.75 + decay_time: 10 + + max_traj_len: 130 + +MODEL: + policy_name: CMAPolicyO + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_ss.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_ss.yaml new file mode 100755 index 0000000..5eff389 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/cma_ss.yaml @@ -0,0 +1,34 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_IDS: [0] +TORCH_GPU_ID: 0 +TORCH_GPU_IDS: [0] +TRAINER_NAME: ss +GPU_NUMBERS: 1 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/cma_ss +CHECKPOINT_FOLDER: data/checkpoints/cma_ss +EVAL_CKPT_PATH_DIR: data/checkpoints/cma_ss + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +#RL: +# POLICY: +# OBS_TRANSFORMS: +# ENABLED_TRANSFORMS: [Resize] + +IL: + epochs: 50 + batch_size: 8 + schedule_ratio: 0.75 + decay_time: 10 + + max_traj_len: 130 + +MODEL: + policy_name: CMAPolicyO + + INSTRUCTION_ENCODER: + bidirectional: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq.yaml new file mode 100755 index 0000000..e4d3ba4 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq.yaml @@ -0,0 +1,31 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +TRAINER_NAME: dagger # recollect_trainer +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq +CHECKPOINT_FOLDER: data/checkpoints/seq2seq +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 15 + batch_size: 5 + + RECOLLECT_TRAINER: + gt_file: + data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json.gz + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq/trajectories.lmdb + +MODEL: + policy_name: Seq2SeqPolicy diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_aug.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_aug.yaml new file mode 100755 index 0000000..59416b0 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_aug.yaml @@ -0,0 +1,26 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task_aug.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq_aug +CHECKPOINT_FOLDER: data/checkpoints/seq2seq_aug +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq_aug + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 15 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 157232 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq_aug/trajectories.lmdb + +MODEL: + policy_name: Seq2SeqPolicy diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_aug_tune.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_aug_tune.yaml new file mode 100755 index 0000000..716a466 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_aug_tune.yaml @@ -0,0 +1,28 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq_aug_tune +CHECKPOINT_FOLDER: data/checkpoints/seq2seq_aug_tune +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq_aug_tune + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 15 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq/trajectories.lmdb + load_from_ckpt: True + ckpt_to_load: data/checkpoints/seq2seq_aug/best_checkpoint.pth # REPLACE + +MODEL: + policy_name: Seq2SeqPolicy diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_da.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_da.yaml new file mode 100755 index 0000000..27e89a9 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_da.yaml @@ -0,0 +1,26 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq_da +CHECKPOINT_FOLDER: data/checkpoints/seq2seq_da +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq_da + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 4 + batch_size: 5 + + DAGGER: + iterations: 10 + update_size: 5000 + p: 0.75 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq_da/trajectories.lmdb + +MODEL: + policy_name: Seq2SeqPolicy diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm.yaml new file mode 100755 index 0000000..bdeceee --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm.yaml @@ -0,0 +1,29 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq_pm +CHECKPOINT_FOLDER: data/checkpoints/seq2seq_pm +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq_pm + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 15 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 10819 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq/trajectories.lmdb + +MODEL: + policy_name: Seq2SeqPolicy + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm_aug.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm_aug.yaml new file mode 100755 index 0000000..59cbe00 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm_aug.yaml @@ -0,0 +1,29 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task_aug.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq_pm_aug +CHECKPOINT_FOLDER: data/checkpoints/seq2seq_pm_aug +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq_pm_aug + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 15 + batch_size: 5 + + DAGGER: + iterations: 1 + update_size: 157232 + p: 1.0 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq_aug/trajectories.lmdb + +MODEL: + policy_name: Seq2SeqPolicy + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm_da_aug_tune.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm_da_aug_tune.yaml new file mode 100755 index 0000000..a983cc2 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/seq2seq_pm_da_aug_tune.yaml @@ -0,0 +1,31 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_ENVIRONMENTS: 1 +TENSORBOARD_DIR: data/tensorboard_dirs/seq2seq_pm_da_aug_tune +CHECKPOINT_FOLDER: data/checkpoints/seq2seq_pm_da_aug_tune +EVAL_CKPT_PATH_DIR: data/checkpoints/seq2seq_pm_da_aug_tune + +EVAL: + USE_CKPT_CONFIG: False + SPLIT: val_unseen + EPISODE_COUNT: -1 + +IL: + epochs: 4 + batch_size: 5 + + DAGGER: + iterations: 10 + update_size: 5000 + p: 0.75 + preload_lmdb_features: False + lmdb_features_dir: data/trajectories_dirs/seq2seq_pm_da_aug_tune/trajectories.lmdb + load_from_ckpt: True + ckpt_to_load: data/checkpoints/seq2seq_pm_aug/best_checkpoint.pth # REPLACE + +MODEL: + policy_name: Seq2SeqPolicy + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/test_set_inference.yaml b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/test_set_inference.yaml new file mode 100755 index 0000000..6be8c01 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/config/r2r_configs/test_set_inference.yaml @@ -0,0 +1,23 @@ +BASE_TASK_CONFIG_PATH: habitat_extensions/config/vlnce_task.yaml +SIMULATOR_GPU_ID: 0 +TORCH_GPU_ID: 0 +NUM_PROCESSES: 1 + +INFERENCE: + SPLIT: test + USE_CKPT_CONFIG: False + SAMPLE: False + CKPT_PATH: data/checkpoints/CMA_PM_DA_Aug.pth + PREDICTIONS_FILE: predictions.json + +MODEL: + policy_name: CMAPolicy + + INSTRUCTION_ENCODER: + bidirectional: True + + CMA: + use: True + + PROGRESS_MONITOR: + use: True diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_CMA.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_CMA.py new file mode 100755 index 0000000..78e79ce --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_CMA.py @@ -0,0 +1,490 @@ +import numpy as np +import time +import torch +import torch.nn as nn +import torch.nn.functional as F + +from gym import Space +from habitat import Config +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat_baselines.rl.models.rnn_state_encoder import ( + build_rnn_state_encoder, +) +from habitat_baselines.rl.ppo.policy import Net +from habitat_baselines.utils.common import CustomFixedCategorical + +from vlnce_baselines.common.aux_losses import AuxLosses +from vlnce_baselines.models.encoders.instruction_encoder import ( + InstructionEncoder, +) +from vlnce_baselines.models.encoders.image_encoders import ( + TorchVisionResNet50, + VlnResnetDepthEncoder +) +from vlnce_baselines.models.policy import ILPolicy + +from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM +from vlnce_baselines.waypoint_pred.utils import nms +from vlnce_baselines.models.utils import ( + length2mask, angle_feature, dir_angle_feature) +import math + + +@baseline_registry.register_policy +class PolicyViewSelectionCMA(ILPolicy): + def __init__( + self, + observation_space: Space, + action_space: Space, + model_config: Config, + ): + super().__init__( + CMANet( + observation_space=observation_space, + model_config=model_config, + num_actions=action_space.n, + ), + action_space.n, + ) + + @classmethod + def from_config( + cls, config: Config, observation_space: Space, action_space: Space + ): + config.defrost() + config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_ID + config.freeze() + + return cls( + observation_space=observation_space, + action_space=action_space, + model_config=config.MODEL, + ) + + +class CMANet(Net): + r"""A cross-modal attention (CMA) network that contains: + Instruction encoder + Depth encoder + RGB encoder + CMA state encoder + """ + + def __init__( + self, observation_space: Space, model_config: Config, num_actions + ): + super().__init__() + self.model_config = model_config + model_config.defrost() + model_config.INSTRUCTION_ENCODER.final_state_only = False + model_config.freeze() + + device = ( + torch.device("cuda", model_config.TORCH_GPU_ID) + if torch.cuda.is_available() + else torch.device("cpu") + ) + self.device = device + + # Init the instruction encoder + self.instruction_encoder = InstructionEncoder( + model_config.INSTRUCTION_ENCODER + ) + + # Init the depth encoder + assert model_config.DEPTH_ENCODER.backbone_type in [ + "VlnResnetDepthEncoder" + ], "DEPTH_ENCODER.backbone_type must be VlnResnetDepthEncoder" + self.depth_encoder = VlnResnetDepthEncoder( + observation_space, + output_size=model_config.DEPTH_ENCODER.output_size, + checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint, + backbone=model_config.DEPTH_ENCODER.backbone, + spatial_output=model_config.spatial_output, + ) + + # Init the RGB encoder + assert model_config.RGB_ENCODER.backbone_type in [ + "TorchVisionResNet152", "TorchVisionResNet50" + ], "RGB_ENCODER.backbone_type must be TorchVisionResNet152 or TorchVisionResNet50" + if model_config.RGB_ENCODER.backbone_type == "TorchVisionResNet50": + self.rgb_encoder = TorchVisionResNet50( + observation_space, + model_config.RGB_ENCODER.output_size, + device, + spatial_output=model_config.spatial_output, + ) + + hidden_size = model_config.STATE_ENCODER.hidden_size + self._hidden_size = hidden_size + + # merging visual inputs + self.rgb_linear = nn.Sequential( + nn.Linear( + 2048, + model_config.RGB_ENCODER.output_size, # 256 + ), + nn.ReLU(True), + ) + if self.depth_encoder.spatial_output: + None + else: + self.depth_linear = nn.Sequential( + nn.Linear( + 128, # 128 + model_config.DEPTH_ENCODER.output_size, # 128 + ), + nn.ReLU(True), + ) + + self.vismerge_linear = nn.Sequential( + nn.Linear( + model_config.DEPTH_ENCODER.output_size + model_config.RGB_ENCODER.output_size + model_config.VISUAL_DIM.directional, + model_config.VISUAL_DIM.vis_hidden, + ), + nn.ReLU(True), + ) + + self.enc_prev_act = nn.Sequential( + nn.Linear(model_config.VISUAL_DIM.directional, model_config.VISUAL_DIM.directional), + nn.Tanh(), + ) + + # Init the RNN state decoder + self.state_encoder = build_rnn_state_encoder( + input_size=model_config.VISUAL_DIM.vis_hidden + model_config.VISUAL_DIM.directional, + hidden_size=model_config.STATE_ENCODER.hidden_size, + rnn_type=model_config.STATE_ENCODER.rnn_type, + num_layers=1, + ) + + self.prev_state_vis_attn = SoftDotAttention( + model_config.STATE_ENCODER.hidden_size, + model_config.VISUAL_DIM.vis_hidden, + model_config.VISUAL_DIM.vis_hidden, + output_tilde=False + ) + + self.text_vis_attn = SoftDotAttention( + self.instruction_encoder.output_size, + model_config.VISUAL_DIM.vis_hidden, + model_config.VISUAL_DIM.vis_hidden, + output_tilde=False + ) + + self.state_text_attn = SoftDotAttention( + model_config.STATE_ENCODER.hidden_size, + self.instruction_encoder.output_size, + self.instruction_encoder.output_size, + output_tilde=False + ) + + self.state_vis_logits = SoftDotAttention( + model_config.STATE_ENCODER.hidden_size+model_config.VISUAL_DIM.vis_hidden+self.instruction_encoder.output_size, + model_config.VISUAL_DIM.vis_hidden, + model_config.STATE_ENCODER.hidden_size, + output_tilde=False + ) + + self.register_buffer( + "_scale", torch.tensor(1.0 / ((hidden_size // 2) ** 0.5)) + ) + + self.space_pool = nn.Sequential( + nn.AdaptiveAvgPool2d((1,1)), + nn.Flatten(start_dim=2),) + + # self.critic = nn.Sequential( + # nn.Linear(model_config.STATE_ENCODER.hidden_size, model_config.STATE_ENCODER.hidden_size), + # nn.ReLU(), + # nn.Dropout(0.5), + # nn.Linear(model_config.STATE_ENCODER.hidden_size, 1), + # ) + # self.drop = nn.Dropout(p=0.50) + # self.drop_env = nn.Dropout(p=0.40) + + self.train() + self.rgb_encoder.cnn.eval() + self.depth_encoder.eval() + # self.waypoint_predictor.eval() + + @property + def is_blind(self): + return self.rgb_encoder.is_blind or self.depth_encoder.is_blind + + @property # trivial argument, just for init with habitat + def output_size(self): + return 1 + + @property + def num_recurrent_layers(self): + return self.state_encoder.num_recurrent_layers + + def forward(self, mode=None, + waypoint_predictor=None, + observations=None, + instruction=None, text_mask=None, + rnn_states=None, + cand_rgb=None, cand_depth=None, + cand_direction=None, cand_mask=None, + headings=None, masks=None, + post_states=None, in_train=True): + r""" + instruction_embedding: [batch_size x INSTRUCTION_ENCODER.output_size] + depth_embedding: [batch_size x DEPTH_ENCODER.output_size] + rgb_embedding: [batch_size x RGB_ENCODER.output_size] + """ + if mode == 'language': + ctx, all_lang_masks = self.instruction_encoder(observations) + + return ctx, all_lang_masks + + elif mode == 'waypoint': + # batch_size = observations['instruction'].size(0) + batch_size = observations['rgb'].shape[0] + ''' encoding rgb/depth at all directions ----------------------------- ''' + NUM_ANGLES = 120 # 120 angles 3 degrees each + NUM_IMGS = 12 + NUM_CLASSES = 12 # 12 distances at each sector + depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1) + rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1) + + # reverse the order of input images to clockwise + a_count = 0 + for i, (k, v) in enumerate(observations.items()): + if 'depth' in k: # You might need to double check the keys order + for bi in range(v.size(0)): + ra_count = (NUM_IMGS - a_count) % NUM_IMGS + depth_batch[ra_count + bi*NUM_IMGS] = v[bi] + rgb_batch[ra_count + bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi] + a_count += 1 + + obs_view12 = {} + obs_view12['depth'] = depth_batch + obs_view12['rgb'] = rgb_batch + + depth_embedding = self.depth_encoder(obs_view12) # torch.Size([bs, 128, 4, 4]) + rgb_embedding = self.rgb_encoder(obs_view12) # torch.Size([bs, 2048, 7, 7]) + + ''' waypoint prediction ----------------------------- ''' + waypoint_heatmap_logits = waypoint_predictor( + rgb_embedding, depth_embedding) + + # reverse the order of images back to counter-clockwise + rgb_embed_reshape = rgb_embedding.reshape( + batch_size, NUM_IMGS, 2048, 7, 7) + depth_embed_reshape = depth_embedding.reshape( + batch_size, NUM_IMGS, 128, 4, 4) + rgb_feats = torch.cat(( + rgb_embed_reshape[:,0:1,:], + torch.flip(rgb_embed_reshape[:,1:,:], [1]), + ), dim=1) + depth_feats = torch.cat(( + depth_embed_reshape[:,0:1,:], + torch.flip(depth_embed_reshape[:,1:,:], [1]), + ), dim=1) + # way_feats = torch.cat(( + # way_feats[:,0:1,:], + # torch.flip(way_feats[:,1:,:], [1]), + # ), dim=1) + + # from heatmap to points + batch_x_norm = torch.softmax( + waypoint_heatmap_logits.reshape( + batch_size, NUM_ANGLES*NUM_CLASSES, + ), dim=1 + ) + batch_x_norm = batch_x_norm.reshape( + batch_size, NUM_ANGLES, NUM_CLASSES, + ) + batch_x_norm_wrap = torch.cat(( + batch_x_norm[:,-1:,:], + batch_x_norm, + batch_x_norm[:,:1,:]), + dim=1) + batch_output_map = nms( + batch_x_norm_wrap.unsqueeze(1), + max_predictions=5, + sigma=(7.0,5.0)) + # predicted waypoints before sampling + batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:] + + candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist() + if isinstance(candidate_lengths, int): + candidate_lengths = [candidate_lengths] + max_candidate = max(candidate_lengths) # including stop + cand_mask = length2mask(candidate_lengths, device=self.device) + + if in_train: + # Waypoint augmentation + # parts of heatmap for sampling (fix offset first) + batch_way_heats_regional = torch.cat( + (waypoint_heatmap_logits[:,-waypoint_predictor.HEATMAP_OFFSET:,:], + waypoint_heatmap_logits[:,:-waypoint_predictor.HEATMAP_OFFSET,:], + ), dim=1) + batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12) + batch_sample_angle_idxes = [] + batch_sample_distance_idxes = [] + batch_way_log_prob = [] + for j in range(batch_size): + # angle indexes with candidates + angle_idxes = batch_output_map[j].nonzero()[:, 0] + # clockwise image indexes (same as batch_x_norm) + img_idxes = ((angle_idxes.cpu().numpy()+5) // 10) + img_idxes[img_idxes==12] = 0 + # # candidate waypoint states + # way_feats_regional = way_feats[j][img_idxes] + # heatmap regions for sampling + way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1) + way_heats_probs = F.softmax(way_heats_regional, 1) + probs_c = torch.distributions.Categorical(way_heats_probs) + way_heats_act = probs_c.sample().detach() + sample_angle_idxes = [] + sample_distance_idxes = [] + for k, way_act in enumerate(way_heats_act): + if img_idxes[k] != 0: + angle_pointer = (img_idxes[k] - 1) * 10 + 5 + else: + angle_pointer = 0 + sample_angle_idxes.append(way_act//12+angle_pointer) + sample_distance_idxes.append(way_act%12) + batch_sample_angle_idxes.append(sample_angle_idxes) + batch_sample_distance_idxes.append(sample_distance_idxes) + batch_way_log_prob.append( + probs_c.log_prob(way_heats_act)) + else: + # batch_way_log_prob = None + None + + cand_rgb = torch.zeros( + (batch_size, max_candidate, 2048, 7, 7), + dtype=torch.float32, device=self.device) + cand_depth = torch.zeros( + (batch_size, max_candidate, 128, 4, 4), + dtype=torch.float32, device=self.device) + batch_angles = [] + batch_distances = [] + batch_img_idxes = [] + for j in range(batch_size): + if in_train: + angle_idxes = torch.tensor(batch_sample_angle_idxes[j]) + distance_idxes = torch.tensor(batch_sample_distance_idxes[j]) + else: + # angle indexes with candidates + angle_idxes = batch_output_map[j].nonzero()[:, 0] + # distance indexes for candidates + distance_idxes = batch_output_map[j].nonzero()[:, 1] + # 2pi- becoz counter-clockwise is the positive direction + angle_rad = 2*math.pi-angle_idxes.float()/120*2*math.pi + batch_angles.append(angle_rad.tolist()) + batch_distances.append( + ((distance_idxes + 1)*0.25).tolist()) + # counter-clockwise image indexes + img_idxes = 12 - ((angle_idxes.cpu().numpy()+5) // 10) + img_idxes[img_idxes==12] = 0 + batch_img_idxes.append(img_idxes) + for k in range(len(img_idxes)): + cand_rgb[j][k] = rgb_feats[j][img_idxes[k]] + cand_depth[j][k] = depth_feats[j][img_idxes[k]] + cand_direction = dir_angle_feature(batch_angles).to(self.device) + + if in_train: + return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances #, batch_way_log_prob + else: + return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances + + elif mode == 'navigation': + cand_rgb_feats_pool = self.space_pool(cand_rgb) + # cand_rgb_feats_pool = self.drop_env(cand_rgb_feats_pool) + rgb_in = self.rgb_linear(cand_rgb_feats_pool) + cand_depth_feats_pool = self.space_pool(cand_depth) + # cand_depth_feats_pool = self.drop_env(cand_depth_feats_pool) + depth_in = self.depth_linear(cand_depth_feats_pool) + + vis_in = self.vismerge_linear( + torch.cat((rgb_in, depth_in, cand_direction), dim=2),) + + ''' aggregate visual features by agent's previous state -------------- ''' + prev_state = rnn_states[:, 0:self.state_encoder.num_recurrent_layers].squeeze(1) + vis_prev_state, _ = self.prev_state_vis_attn( + prev_state, vis_in, cand_mask) + + ''' first state encoder for new visual features ''' + prev_actions = angle_feature(headings, device=self.device) + prev_actions = self.enc_prev_act(prev_actions) + # prev_actions = self.drop(prev_actions) + + state_in = torch.cat([vis_prev_state, prev_actions], dim=1) + rnn_states_out = rnn_states.detach().clone() + ( + state, + rnn_states_out[:, 0 : self.state_encoder.num_recurrent_layers], + ) = self.state_encoder( + state_in, + rnn_states[:, 0 : self.state_encoder.num_recurrent_layers], + masks, + ) + + ''' language attention using state ''' + text_state, _ = self.state_text_attn( + state, instruction, text_mask) + + ''' visual attention using attended language ''' + vis_text_feats, _ = self.text_vis_attn( + text_state, vis_in, cand_mask) + + x = torch.cat((state, vis_text_feats, text_state), dim=1) + _, logits = self.state_vis_logits( + x, vis_in, cand_mask, output_prob=False) + + return logits, rnn_states_out + + elif mode == 'waypoint_actor': + + None + + elif mode == 'critic': + return self.critic(post_states) + + +class SoftDotAttention(nn.Module): + def __init__(self, q_dim, kv_dim, hidden_dim, output_tilde=False): + '''Initialize layer.''' + super(SoftDotAttention, self).__init__() + self.linear_q = nn.Linear(q_dim, hidden_dim, bias=True) + self.linear_kv = nn.Linear(kv_dim, hidden_dim, bias=True) + self.sm = nn.Softmax(dim=1) + + self.output_tilde = output_tilde + if output_tilde: + self.linear_out = nn.Linear(q_dim + hidden_dim, hidden_dim, bias=False) + self.tanh = nn.Tanh() + + def forward(self, q, kv, mask=None, output_prob=True): + '''Propagate h through the network. + q: (query) batch x dim + kv: (keys and values) batch x seq_len x dim + mask: batch x seq_len indices to be masked + ''' + x_q = self.linear_q(q).unsqueeze(2) # batch x dim x 1 + x_kv = self.linear_kv(kv) + + # Get attention + attn = torch.bmm(x_kv, x_q).squeeze(2) # batch x seq_len + logit = attn + + if mask is not None: + # -Inf masking prior to the softmax + attn.masked_fill_(mask, -float('inf')) + attn = self.sm(attn) # There will be a bug here, but it's actually a problem in torch source code. + attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len + + weighted_x_kv = torch.bmm(attn3, x_kv).squeeze(1) # batch x dim + if not output_prob: + attn = logit + if self.output_tilde: + h_tilde = torch.cat((weighted_x_kv, q), 1) + h_tilde = self.tanh(self.linear_out(h_tilde)) + return h_tilde, attn + else: + return weighted_x_kv, attn diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_HAMT.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_HAMT.py new file mode 100755 index 0000000..775ef60 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/Policy_ViewSelection_HAMT.py @@ -0,0 +1,434 @@ +from copy import deepcopy +import numpy as np +import time +import torch +import torch.nn as nn +import torch.nn.functional as F + +from gym import Space +from habitat import Config +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat_baselines.rl.models.rnn_state_encoder import ( + build_rnn_state_encoder, +) +from habitat_baselines.rl.ppo.policy import Net + +from vlnce_baselines.models.hamt.vlnbert_init import get_vlnbert_models +from vlnce_baselines.common.aux_losses import AuxLosses +from vlnce_baselines.models.encoders.instruction_encoder import ( + InstructionEncoder, +) +from vlnce_baselines.models.encoders.image_encoders import ( + TorchVisionResNet50, + VlnResnetDepthEncoder, + CLIPEncoder, +) +from vlnce_baselines.models.encoders.video_encoder import VideoRGBEcnoder +from vlnce_baselines.models.policy import ILPolicy + +from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM +from vlnce_baselines.waypoint_pred.utils import nms +from vlnce_baselines.models.utils import ( + angle_feature_with_ele, dir_angle_feature_with_ele, angle_feature_torch, length2mask) +import math + +@baseline_registry.register_policy +class PolicyViewSelectionHAMT(ILPolicy): + def __init__( + self, + observation_space: Space, + action_space: Space, + model_config: Config, + ): + super().__init__( + HAMT( + observation_space=observation_space, + model_config=model_config, + num_actions=action_space.n, + ), + action_space.n, + ) + + @classmethod + def from_config( + cls, config: Config, observation_space: Space, action_space: Space + ): + config.defrost() + config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_ID + config.MODEL.use_critic = (config.IL.feedback == 'sample') + config.freeze() + + return cls( + observation_space=observation_space, + action_space=action_space, + model_config=config.MODEL, + ) + +class Critic(nn.Module): + def __init__(self, drop_ratio): + super(Critic, self).__init__() + self.state2value = nn.Sequential( + nn.Linear(768, 512), + nn.ReLU(), + nn.Dropout(drop_ratio), + nn.Linear(512, 1), + ) + + def forward(self, state): + return self.state2value(state).squeeze() + +class HAMT(Net): + def __init__( + self, observation_space: Space, model_config: Config, num_actions, + ): + super().__init__() + + device = ( + torch.device("cuda", model_config.TORCH_GPU_ID) + if torch.cuda.is_available() + else torch.device("cpu") + ) + self.device = device + + print('\nInitalizing the HAMT model ...') + self.vln_bert = get_vlnbert_models(config=model_config) + if model_config.task_type == 'r2r': + self.rgb_projection = nn.Linear(model_config.RGB_ENCODER.output_size, 768) + elif model_config.task_type == 'rxr': + self.rgb_projection = nn.Linear(model_config.RGB_ENCODER.output_size, 512) + # self.rgb_projection = nn.Linear(2048, 768) # for vit 768 compability + self.drop_env = nn.Dropout(p=0.4) + if model_config.use_critic: + self.critic = Critic(drop_ratio=0.5) + + # Init the depth encoder + assert model_config.DEPTH_ENCODER.backbone_type in [ + "VlnResnetDepthEncoder" + ], "DEPTH_ENCODER.backbone_type must be VlnResnetDepthEncoder" + self.depth_encoder = VlnResnetDepthEncoder( + observation_space, + output_size=model_config.DEPTH_ENCODER.output_size, + checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint, + backbone=model_config.DEPTH_ENCODER.backbone, + spatial_output=model_config.spatial_output, + ) + self.space_pool_depth = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(start_dim=2)) + + # Init the RGB encoder + # assert model_config.RGB_ENCODER.backbone_type in [ + # "TorchVisionResNet152", "TorchVisionResNet50" + # ], "RGB_ENCODER.backbone_type must be TorchVisionResNet152 or TorchVisionResNet50" + if model_config.RGB_ENCODER.backbone_type == "TorchVisionResNet50": + self.rgb_encoder = TorchVisionResNet50( + observation_space, + model_config.RGB_ENCODER.output_size, + device, + spatial_output=model_config.spatial_output, + ) + elif model_config.RGB_ENCODER.backbone_type == "CLIP": + self.rgb_encoder = CLIPEncoder(self.device) + elif model_config.RGB_ENCODER.backbone_type.startswith("VideoIntern"): + self.rgb_encoder = VideoRGBEcnoder( + observation_space, + model_config.RGB_ENCODER.output_size, + model_config.RGB_ENCODER.backbone_type, + self.device + ) + self.clip_encoder = CLIPEncoder(self.device) + if "Base" in model_config.RGB_ENCODER.backbone_type: + self.rgb_embedding_projection = nn.Linear(512+768, 768) + elif "Large" in model_config.RGB_ENCODER.backbone_type: + self.rgb_embedding_projection = nn.Linear(512+1024, 768) + + + self.space_pool_rgb = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(start_dim=2)) + + self.pano_img_idxes = np.arange(0, 12, dtype=np.int64) # 逆时针 + pano_angle_rad_c = (1-self.pano_img_idxes/12) * 2 * math.pi # 对应到逆时针 + self.pano_angle_fts = angle_feature_torch(torch.from_numpy(pano_angle_rad_c)) + + if model_config.progress_monitor: + self.state2 = nn.Sequential( + nn.Dropout(0.5), + nn.Linear(768,512), + nn.Tanh() + ) + self.progress_monitor = nn.Sequential( + nn.Linear(model_config.max_len + 512, 1), + nn.Sigmoid() + ) + + @property # trivial argument, just for init with habitat + def output_size(self): + return 1 + + @property + def is_blind(self): + return self.rgb_encoder.is_blind or self.depth_encoder.is_blind + + @property + def num_recurrent_layers(self): + return 1 + + def forward(self, mode=None, + waypoint_predictor=None, observations=None, in_train=True, + txt_ids=None, txt_masks=None, txt_embeds=None, + hist_rgb_fts=None, hist_depth_fts=None, hist_ang_fts=None, embeddings = None, + hist_pano_rgb_fts=None, hist_pano_depth_fts=None, hist_pano_ang_fts=None, + hist_embeds=None, hist_lens=None, ob_step=None, + ob_rgb_fts=None, ob_dep_fts=None, ob_ang_fts=None, ob_dis_fts=None, + ob_nav_types=None, ob_masks=None, return_states=False, critic_states=None, + h_t=None,language_attention=None): + + if mode == 'language': + encoded_sentence = self.vln_bert(mode, txt_ids=txt_ids, txt_masks=txt_masks) + return encoded_sentence + + elif mode == 'waypoint': + # batch_size = observations['instruction'].size(0) + batch_size = observations['rgb'].shape[0] + ''' encoding rgb/depth at all directions ----------------------------- ''' + NUM_ANGLES = 120 # 120 angles 3 degrees each + NUM_IMGS = 12 + NUM_CLASSES = 12 # 12 distances at each sector + depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1) + rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1) + + # reverse the order of input images to clockwise + a_count = 0 + for i, (k, v) in enumerate(observations.items()): + if 'depth' in k: # You might need to double check the keys order + for bi in range(v.size(0)): + ra_count = (NUM_IMGS - a_count) % NUM_IMGS + depth_batch[ra_count + bi*NUM_IMGS] = v[bi] + rgb_batch[ra_count + bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi] + a_count += 1 + obs_view12 = {} + + rgb_batch = rgb_batch/255 + + + obs_view12['depth'] = depth_batch + obs_view12['video_rgbs'] = observations['video_rgbs'] + depth_embedding = self.depth_encoder(obs_view12) # torch.Size([bs, 128, 4, 4]) + video_embedding = self.rgb_encoder(obs_view12) # torch.Size([bs, 2048, 7, 7]) + clip_embedding = self.clip_encoder({'rgb':rgb_batch}) + rgb_embedding_cated = torch.cat([video_embedding,clip_embedding],1) + rgb_embedding = self.rgb_embedding_projection(rgb_embedding_cated) + + ''' waypoint prediction ----------------------------- ''' + waypoint_heatmap_logits = waypoint_predictor( + rgb_embedding, depth_embedding) + + # reverse the order of images back to counter-clockwise + rgb_embed_reshape = rgb_embedding.reshape( + batch_size, NUM_IMGS, 768, 1, 1) + # rgb_embed_reshape = rgb_embedding.reshape( + # batch_size, NUM_IMGS, 2048, 7, 7) + depth_embed_reshape = depth_embedding.reshape( + batch_size, NUM_IMGS, 128, 4, 4) + rgb_feats = torch.cat(( + rgb_embed_reshape[:,0:1,:], + torch.flip(rgb_embed_reshape[:,1:,:], [1]), + ), dim=1) + depth_feats = torch.cat(( + depth_embed_reshape[:,0:1,:], + torch.flip(depth_embed_reshape[:,1:,:], [1]), + ), dim=1) + # way_feats = torch.cat(( + # way_feats[:,0:1,:], + # torch.flip(way_feats[:,1:,:], [1]), + # ), dim=1) + + # from heatmap to points + batch_x_norm = torch.softmax( + waypoint_heatmap_logits.reshape( + batch_size, NUM_ANGLES*NUM_CLASSES, + ), dim=1 + ) + batch_x_norm = batch_x_norm.reshape( + batch_size, NUM_ANGLES, NUM_CLASSES, + ) + batch_x_norm_wrap = torch.cat(( + batch_x_norm[:,-1:,:], + batch_x_norm, + batch_x_norm[:,:1,:]), + dim=1) + batch_output_map = nms( + batch_x_norm_wrap.unsqueeze(1), + max_predictions=5, + sigma=(7.0,5.0)) + + # predicted waypoints before sampling + batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:] + + # candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist() + # if isinstance(candidate_lengths, int): + # candidate_lengths = [candidate_lengths] + # max_candidate = max(candidate_lengths) # including stop + # cand_mask = length2mask(candidate_lengths, device=self.device) + + if in_train: + # Waypoint augmentation + # parts of heatmap for sampling (fix offset first) + HEATMAP_OFFSET = 5 + batch_way_heats_regional = torch.cat( + (waypoint_heatmap_logits[:,-HEATMAP_OFFSET:,:], + waypoint_heatmap_logits[:,:-HEATMAP_OFFSET,:], + ), dim=1) + batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12) + batch_sample_angle_idxes = [] + batch_sample_distance_idxes = [] + # batch_way_log_prob = [] + for j in range(batch_size): + # angle indexes with candidates + angle_idxes = batch_output_map[j].nonzero()[:, 0] + # clockwise image indexes (same as batch_x_norm) + img_idxes = ((angle_idxes.cpu().numpy()+5) // 10) + img_idxes[img_idxes==12] = 0 + # # candidate waypoint states + # way_feats_regional = way_feats[j][img_idxes] + # heatmap regions for sampling + way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1) + way_heats_probs = F.softmax(way_heats_regional, 1) + probs_c = torch.distributions.Categorical(way_heats_probs) + way_heats_act = probs_c.sample().detach() + sample_angle_idxes = [] + sample_distance_idxes = [] + for k, way_act in enumerate(way_heats_act): + if img_idxes[k] != 0: + angle_pointer = (img_idxes[k] - 1) * 10 + 5 + else: + angle_pointer = 0 + sample_angle_idxes.append(way_act//12+angle_pointer) + sample_distance_idxes.append(way_act%12) + batch_sample_angle_idxes.append(sample_angle_idxes) + batch_sample_distance_idxes.append(sample_distance_idxes) + # batch_way_log_prob.append( + # probs_c.log_prob(way_heats_act)) + else: + # batch_way_log_prob = None + None + + rgb_feats = self.space_pool_rgb(rgb_feats).cpu() + depth_feats = self.space_pool_depth(depth_feats).cpu() + + # for cand + cand_rgb = [] + cand_depth = [] + cand_angle_fts = [] + cand_dis_fts = [] + cand_img_idxes = [] + cand_angles = [] + cand_distances = [] + for j in range(batch_size): + if in_train: + angle_idxes = torch.tensor(batch_sample_angle_idxes[j]) + distance_idxes = torch.tensor(batch_sample_distance_idxes[j]) + else: + angle_idxes = batch_output_map[j].nonzero()[:, 0] + distance_idxes = batch_output_map[j].nonzero()[:, 1] + # for angle & distance + angle_rad_c = angle_idxes.cpu().float()/120*2*math.pi # 顺时针 + angle_rad_cc = 2*math.pi-angle_idxes.float()/120*2*math.pi # 逆时针 + cand_angle_fts.append( angle_feature_torch(angle_rad_c) ) + cand_angles.append(angle_rad_cc.tolist()) + cand_distances.append( ((distance_idxes + 1)*0.25).tolist() ) + cand_dis_fts.append((((distance_idxes + 1)*0.25/3).repeat(4,1).T).cpu()) + # for img idxes + img_idxes = 12 - (angle_idxes.cpu().numpy()+5) // 10 # 逆时针 + img_idxes[img_idxes==12] = 0 + cand_img_idxes.append(img_idxes) + # for rgb & depth + cand_rgb.append(rgb_feats[j, img_idxes, ...]) + cand_depth.append(depth_feats[j, img_idxes, ...]) + + # for pano + pano_rgb = rgb_feats # B x 12 x 2048 + pano_depth = depth_feats # B x 12 x 128 + pano_angle_fts = deepcopy(self.pano_angle_fts) # 12 x 4 + pano_dis_fts = torch.zeros_like(pano_angle_fts) # 12 x 4 + pano_img_idxes = deepcopy(self.pano_img_idxes) # 12 + + # cand_angle_fts 顺时针 + # cand_angles 逆时针 + outputs = { + 'cand_rgb': cand_rgb, # [K x 2048] + 'cand_depth': cand_depth, # [K x 128] + 'cand_angle_fts': cand_angle_fts, # [K x 4] + 'cand_dis_fts': cand_dis_fts, # [K x 4] + 'cand_img_idxes': cand_img_idxes, # [K] + 'cand_angles': cand_angles, # [K] + 'cand_distances': cand_distances, # [K] + + 'pano_rgb': pano_rgb, # B x 12 x 2048 + 'pano_depth': pano_depth, # B x 12 x 128 + 'pano_angle_fts': pano_angle_fts, # 12 x 4 + 'pano_dis_fts': pano_dis_fts, # 12 x 4 + 'pano_img_idxes': pano_img_idxes, # 12 + } + + return outputs + + elif mode == 'navigation': + hist_embeds = torch.stack(hist_embeds, 1) + hist_masks = length2mask(hist_lens, size=hist_embeds.size(1)).logical_not() + + ob_rgb_fts = self.drop_env(ob_rgb_fts) + ob_rgb_fts = self.rgb_projection(ob_rgb_fts) + + act_logits, txt_embeds, hist_embeds, ob_embeds, lang_attention_score = self.vln_bert( + mode, txt_embeds=txt_embeds, txt_masks=txt_masks, + hist_embeds=hist_embeds, hist_masks=hist_masks, + ob_rgb_fts=ob_rgb_fts, ob_dep_fts=ob_dep_fts, ob_ang_fts=ob_ang_fts, ob_dis_fts=ob_dis_fts, + ob_nav_types=ob_nav_types, ob_masks=ob_masks) + + if return_states: + # if self.args.no_lang_ca: + # states = hist_embeds[:, 0] + # else: + # states = txt_embeds[:, 0] * hist_embeds[:, 0] # [CLS] + states = txt_embeds[:, 0] * hist_embeds[:, 0] + return act_logits, states, lang_attention_score + + return (act_logits, ) + + elif mode == 'history': + if hist_rgb_fts is not None: + hist_rgb_fts = self.drop_env(hist_rgb_fts) + hist_rgb_fts = self.rgb_projection(hist_rgb_fts) + if hist_pano_rgb_fts is not None: + hist_pano_rgb_fts = self.drop_env(hist_pano_rgb_fts) + hist_pano_rgb_fts = self.rgb_projection(hist_pano_rgb_fts) + if ob_step is not None: + ob_step_ids = torch.LongTensor([ob_step]).cuda() + else: + ob_step_ids = None + hist_embeds = self.vln_bert(mode, hist_rgb_fts=hist_rgb_fts, + hist_ang_fts=hist_ang_fts, hist_depth_fts=hist_depth_fts, ob_step_ids=ob_step_ids, + hist_pano_rgb_fts=hist_pano_rgb_fts, hist_pano_depth_fts=hist_pano_depth_fts, + hist_pano_ang_fts=hist_pano_ang_fts) + return hist_embeds + + elif mode == 'critic': + return self.critic(critic_states) + + elif mode == 'progress': + pm_in = torch.cat((self.state2(h_t),language_attention.sum(1)),1) + progresses = self.progress_monitor(pm_in) + return progresses + +class BertLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/__init__.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/image_encoders.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/image_encoders.py new file mode 100755 index 0000000..f6554f9 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/image_encoders.py @@ -0,0 +1,277 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision.models as models +from gym import spaces +from habitat import logger +from habitat_baselines.rl.ddppo.policy import resnet +from habitat_baselines.rl.ddppo.policy.resnet_policy import ResNetEncoder +import clip +import torchvision + +class VlnResnetDepthEncoder(nn.Module): + def __init__( + self, + observation_space, + output_size=128, + checkpoint="NONE", + backbone="resnet50", + resnet_baseplanes=32, + normalize_visual_inputs=False, + trainable=False, + spatial_output: bool = False, + ): + super().__init__() + self.visual_encoder = ResNetEncoder( + spaces.Dict({"depth": observation_space.spaces["depth"]}), + baseplanes=resnet_baseplanes, + ngroups=resnet_baseplanes // 2, + make_backbone=getattr(resnet, backbone), + normalize_visual_inputs=normalize_visual_inputs, + ) + + for param in self.visual_encoder.parameters(): + param.requires_grad_(trainable) + + if checkpoint != "NONE": + ddppo_weights = torch.load(checkpoint) + + weights_dict = {} + for k, v in ddppo_weights["state_dict"].items(): + split_layer_name = k.split(".")[2:] + if split_layer_name[0] != "visual_encoder": + continue + + layer_name = ".".join(split_layer_name[1:]) + weights_dict[layer_name] = v + + del ddppo_weights + self.visual_encoder.load_state_dict(weights_dict, strict=True) + + self.spatial_output = spatial_output + + if not self.spatial_output: + self.output_shape = (output_size,) + # self.visual_fc = nn.Sequential( + # nn.Flatten(), + # nn.Linear( + # np.prod(self.visual_encoder.output_shape), output_size + # ), + # nn.ReLU(True), + # ) + None + else: + self.spatial_embeddings = nn.Embedding( + self.visual_encoder.output_shape[1] + * self.visual_encoder.output_shape[2], + 64, + ) + + self.output_shape = list(self.visual_encoder.output_shape) + self.output_shape[0] += self.spatial_embeddings.embedding_dim + self.output_shape = tuple(self.output_shape) + + + def forward(self, observations): + """ + Args: + observations: [BATCH, HEIGHT, WIDTH, CHANNEL] + Returns: + [BATCH, OUTPUT_SIZE] + """ + if "depth_features" in observations: + x = observations["depth_features"] + else: + x = self.visual_encoder(observations) + + if self.spatial_output: + b, c, h, w = x.size() + + spatial_features = ( + self.spatial_embeddings( + torch.arange( + 0, + self.spatial_embeddings.num_embeddings, + device=x.device, + dtype=torch.long, + ) + ) + .view(1, -1, h, w) + .expand(b, self.spatial_embeddings.embedding_dim, h, w) + ) + + return torch.cat([x, spatial_features], dim=1) + else: + # return self.visual_fc(x) + return x + + +class TorchVisionResNet50(nn.Module): + r""" + Takes in observations and produces an embedding of the rgb component. + + Args: + observation_space: The observation_space of the agent + output_size: The size of the embedding vector + device: torch.device + """ + + def __init__( + self, + observation_space, + output_size, + device, + spatial_output: bool = False, + ): + super().__init__() + self.device = device + self.resnet_layer_size = 2048 + linear_layer_input_size = 0 + if "rgb" in observation_space.spaces: + self._n_input_rgb = observation_space.spaces["rgb"].shape[2] + obs_size_0 = observation_space.spaces["rgb"].shape[0] + obs_size_1 = observation_space.spaces["rgb"].shape[1] + if obs_size_0 != 224 or obs_size_1 != 224: + logger.warn( + "TorchVisionResNet50: observation size is not conformant to expected ResNet input size [3x224x224]" + ) + linear_layer_input_size += self.resnet_layer_size + else: + self._n_input_rgb = 0 + + if self.is_blind: + self.cnn = nn.Sequential() + return + + rgb_resnet = models.resnet50(pretrained=True) + rgb_modules = list(rgb_resnet.children())[:-2] + self.cnn = torch.nn.Sequential(*rgb_modules) + + # disable gradients for resnet, params frozen + for param in self.cnn.parameters(): + param.requires_grad_(False) + self.cnn.eval() + + self.spatial_output = spatial_output + + if not self.spatial_output: + self.output_shape = (output_size,) + # self.fc = nn.Linear(linear_layer_input_size, output_size) + # self.activation = nn.ReLU() + None + else: + class SpatialAvgPool(nn.Module): + def forward(self, x): + x = F.adaptive_avg_pool2d(x, (4, 4)) + + return x + self.cnn.avgpool = SpatialAvgPool() + self.cnn.fc = nn.Sequential() + self.spatial_embeddings = nn.Embedding(4 * 4, 64) + self.output_shape = ( + self.resnet_layer_size + self.spatial_embeddings.embedding_dim, + 4, + 4, + ) + + # self.layer_extract = self.cnn._modules.get("avgpool") + + from torchvision import transforms + self.rgb_transform = torch.nn.Sequential( + # transforms.ConvertImageDtype(torch.float), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), + ) + + @property + def is_blind(self): + return self._n_input_rgb == 0 + + def forward(self, observations): + r"""Sends RGB observation through the TorchVision ResNet50 pre-trained + on ImageNet. Sends through fully connected layer, activates, and + returns final embedding. + """ + + def resnet_forward(observation): + # resnet_output = torch.zeros( + # 1, dtype=torch.float32, device=observation.device + # ) + # def hook(m, i, o): + # resnet_output.set_(o) + + # output: [BATCH x RESNET_DIM] + # h = self.layer_extract.register_forward_hook(hook) + resnet_output = self.cnn(observation) + # h.remove() + return resnet_output + + if "rgb_features" in observations: + resnet_output = observations["rgb_features"] + else: + # permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH] + rgb_observations = observations["rgb"].permute(0, 3, 1, 2) + + rgb_observations = self.rgb_transform(rgb_observations) + # rgb_observations = rgb_observations / 255.0 # normalize RGB + + resnet_output = resnet_forward(rgb_observations.contiguous()) + + if self.spatial_output: + b, c, h, w = resnet_output.size() + + spatial_features = ( + self.spatial_embeddings( + torch.arange( + 0, + self.spatial_embeddings.num_embeddings, + device=resnet_output.device, + dtype=torch.long, + ) + ) + .view(1, -1, h, w) + .expand(b, self.spatial_embeddings.embedding_dim, h, w) + ) + + return torch.cat([resnet_output, spatial_features], dim=1)#.to(self.device) + else: + # return self.activation( + # self.fc(torch.flatten(resnet_output, 1)) + # ) # [BATCH x OUTPUT_DIM] + return resnet_output + + +class CLIPEncoder(nn.Module): + r""" + Takes in observations and produces an embedding of the rgb component. + Args: + observation_space: The observation_space of the agent + output_size: The size of the embedding vector + device: torch.device + """ + + def __init__( + self, device, + ): + super().__init__() + self.model, _ = clip.load("ViT-B/32", device=device) + for param in self.model.parameters(): + param.requires_grad_(False) + self.model.eval() + + from torchvision import transforms + self.rgb_transform = torch.nn.Sequential( + # transforms.ConvertImageDtype(torch.float), + transforms.Normalize([0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]), + ) + + def forward(self, observations): + r"""Sends RGB observation through the TorchVision ResNet50 pre-trained + on ImageNet. Sends through fully connected layer, activates, and + returns final embedding. + """ + rgb_observations = observations["rgb"].permute(0, 3, 1, 2) + rgb_observations = self.rgb_transform(rgb_observations) + output = self.model.encode_image(rgb_observations.contiguous()) + + return output.float() \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/instruction_encoder.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/instruction_encoder.py new file mode 100755 index 0000000..8101f53 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/instruction_encoder.py @@ -0,0 +1,101 @@ +import gzip +import json + +import torch +import torch.nn as nn +from habitat import Config + + +class InstructionEncoder(nn.Module): + def __init__(self, config: Config): + r"""An encoder that uses RNN to encode an instruction. Returns + the final hidden state after processing the instruction sequence. + + Args: + config: must have + embedding_size: The dimension of each embedding vector + hidden_size: The hidden (output) size + rnn_type: The RNN cell type. Must be GRU or LSTM + final_state_only: Whether or not to return just the final state + """ + super().__init__() + + self.config = config + + # lang_drop_ratio = 0.50 + # self.drop = nn.Dropout(p=lang_drop_ratio) + + rnn = nn.GRU if self.config.rnn_type == "GRU" else nn.LSTM + self.encoder_rnn = rnn( + input_size=config.embedding_size, + hidden_size=config.hidden_size, + bidirectional=config.bidirectional, + ) + + if config.sensor_uuid == "instruction": + if self.config.use_pretrained_embeddings: + self.embedding_layer = nn.Embedding.from_pretrained( + embeddings=self._load_embeddings(), + freeze=not self.config.fine_tune_embeddings, + ) + else: # each embedding initialized to sampled Gaussian + self.embedding_layer = nn.Embedding( + num_embeddings=config.vocab_size, + embedding_dim=config.embedding_size, + padding_idx=0, + ) + + @property + def output_size(self): + return self.config.hidden_size * (1 + int(self.config.bidirectional)) + + def _load_embeddings(self): + """Loads word embeddings from a pretrained embeddings file. + PAD: index 0. [0.0, ... 0.0] + UNK: index 1. mean of all R2R word embeddings: [mean_0, ..., mean_n] + why UNK is averaged: https://bit.ly/3u3hkYg + Returns: + embeddings tensor of size [num_words x embedding_dim] + """ + with gzip.open(self.config.embedding_file, "rt") as f: + embeddings = torch.tensor(json.load(f)) + return embeddings + + def forward(self, observations): + """ + Tensor sizes after computation: + instruction: [batch_size x seq_length] + lengths: [batch_size] + hidden_state: [batch_size x hidden_size] + """ + + if self.config.sensor_uuid == "instruction": + instruction = observations["instruction"].long() + lengths = (instruction != 0.0).long().sum(dim=1) + instruction = self.embedding_layer(instruction) + # instruction = self.drop(instruction) + else: + instruction = observations["rxr_instruction"] + + lengths = (instruction != 0.0).long().sum(dim=2) + lengths = (lengths != 0.0).long().sum(dim=1) + + packed_seq = nn.utils.rnn.pack_padded_sequence( + instruction, lengths.cpu(), batch_first=True, enforce_sorted=False + ) + output, final_state = self.encoder_rnn(packed_seq) + + if self.config.rnn_type == "LSTM": + final_state = final_state[0] + + if self.config.final_state_only: # default False + return final_state.squeeze(0) + else: + ctx = nn.utils.rnn.pad_packed_sequence(output, + batch_first=True)[0].permute(0, 2, 1) + all_lang_masks = (ctx == 0.0).all(dim=1) + ctx = ctx.permute(0, 2, 1) + + # ctx = self.drop(ctx) + + return ctx, all_lang_masks diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/video_encoder.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/video_encoder.py new file mode 100644 index 0000000..aa57a82 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/encoders/video_encoder.py @@ -0,0 +1,189 @@ +from vlnce_baselines.models.videomae import volume_transforms, video_transforms, modeling_finetune, utils +from vlnce_baselines.models.videomae.get_args import get_args +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm import create_model + +from habitat import logger + +from collections import OrderedDict + +class VideoRGBEcnoder(nn.Module): + r""" + Takes in observations and produces an embedding of the rgb component. + + Args: + observation_space: The observation_space of the agent + output_size: The size of the embedding vector + device: torch.device + """ + + def __init__( + self, + observation_space, + output_size, + model_name, + device, + spatial_output: bool = False, + ): + super().__init__() + self.device = device + + args, ds_init = get_args() + + if "Large" in model_name: + args.model = 'vit_large_patch16_224' + args.finetune = 'pretrained/VideoMAE/vit_l_hybrid_pt_800e.pth' + elif "Base" in model_name: + args.model = 'vit_base_patch16_224' + args.finetune = 'pretrained/VideoMAE/vit_b_hybrid_pt_800e.pth' + + model = create_model( + args.model, + pretrained=False, + num_classes=args.nb_classes, + all_frames=args.num_frames * args.num_segments, + tubelet_size=args.tubelet_size, + drop_rate=args.drop, + drop_path_rate=args.drop_path, + attn_drop_rate=args.attn_drop_rate, + drop_block_rate=None, + use_mean_pooling=args.use_mean_pooling, + init_scale=args.init_scale, + ) + + + checkpoint = torch.load(args.finetune, map_location='cpu') + print("Load ckpt from %s" % args.finetune) + checkpoint_model = None + for model_key in args.model_key.split('|'): + if model_key in checkpoint: + checkpoint_model = checkpoint[model_key] + print("Load state_dict by model_key = %s" % model_key) + break + if checkpoint_model is None: + checkpoint_model = checkpoint + state_dict = model.state_dict() + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + all_keys = list(checkpoint_model.keys()) + new_dict = OrderedDict() + for key in all_keys: + if key.startswith('backbone.'): + new_dict[key[9:]] = checkpoint_model[key] + elif key.startswith('encoder.'): + new_dict[key[8:]] = checkpoint_model[key] + else: + new_dict[key] = checkpoint_model[key] + checkpoint_model = new_dict + + # interpolate position embedding + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] # channel dim + num_patches = model.patch_embed.num_patches # + num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1 + + # height (== width) for the checkpoint position embedding + orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens)//(args.num_frames // model.patch_embed.tubelet_size)) ** 0.5) + # height (== width) for the new position embedding + new_size = int((num_patches // (args.num_frames // model.patch_embed.tubelet_size) )** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + # B, L, C -> BT, H, W, C -> BT, C, H, W + pos_tokens = pos_tokens.reshape(-1, args.num_frames // model.patch_embed.tubelet_size, orig_size, orig_size, embedding_size) + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C + pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, args.num_frames // model.patch_embed.tubelet_size, new_size, new_size, embedding_size) + pos_tokens = pos_tokens.flatten(1, 3) # B, L, C + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix) + + self.model = model + + # disable gradients for resnet, params frozen + for param in self.model.parameters(): + param.requires_grad = False + self.model.eval() + + self.resnet_layer_size = 768 + self.spatial_output = spatial_output + + if not self.spatial_output: + self.output_shape = (output_size,) + # self.fc = nn.Linear(768, output_size) + # self.activation = nn.ReLU() + else: + + class SpatialAvgPool(nn.Module): + def forward(self, x): + x = F.adaptive_avg_pool2d(x, (4, 4)) + + return x + + self.model.avgpool = SpatialAvgPool() + self.model.fc = nn.Sequential() + + self.spatial_embeddings = nn.Embedding(4 * 4, 64) + + self.output_shape = ( + self.resnet_layer_size + self.spatial_embeddings.embedding_dim, + 4, + 4, + ) + + self.data_transform = video_transforms.Compose([ + video_transforms.Resize(args.short_side_size, interpolation='bilinear'), + video_transforms.CenterCrop(size=(args.input_size, args.input_size)), + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + + @property + def is_blind(self): + return self._n_input_rgb == 0 + + def forward(self, observations): + r"""Sends RGB observation through the TorchVision ResNet50 pre-trained + on ImageNet. Sends through fully connected layer, activates, and + returns final embedding. + """ + + # permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH] + # rgb_observations = observations["rgb"].permute(0, 3, 1, 2) + video_rgb_batch = torch.vstack([self.data_transform(obs[k])[None,...] for obs in observations["video_rgbs"] for k in obs.keys()]).cuda() + + if self.spatial_output: + features = self.model.get_spatial_features(video_rgb_batch) + features = self.model.avgpool(features) + b, c, h, w = features.size() + + spatial_features = ( + self.spatial_embeddings( + torch.arange( + 0, + self.spatial_embeddings.num_embeddings, + device=features.device, + dtype=torch.long, + ) + ) + .view(1, -1, h, w) + .expand(b, self.spatial_embeddings.embedding_dim, h, w) + ) + + return torch.cat([features, spatial_features], dim=1)#.to(self.device) + else: + return self.model.forward_features(video_rgb_batch) \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/base_il_trainer.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/base_il_trainer.py new file mode 100644 index 0000000..a0b959a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/base_il_trainer.py @@ -0,0 +1,1186 @@ +import json +import jsonlines +import os +import sys +import time +import glob +import warnings +from collections import defaultdict +from typing import Dict, List + +import torch +import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP +import torch.distributed as distr +import torch.multiprocessing as mp +import gzip +import math +from copy import deepcopy + +import tqdm +from gym import Space +from habitat import Config, logger +from habitat.utils.visualizations.utils import append_text_to_image +from habitat_baselines.common.base_il_trainer import BaseILTrainer +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat_baselines.common.environments import get_env_class +from habitat_baselines.common.obs_transformers import ( + apply_obs_transforms_batch, + apply_obs_transforms_obs_space, + get_active_obs_transforms, +) +from habitat_extensions.measures import Position +from habitat_baselines.common.tensorboard_utils import TensorboardWriter +from habitat_baselines.utils.common import batch_obs, generate_video +from habitat_baselines.utils.common import ( + get_checkpoint_id, + poll_checkpoint_folder, +) + +from habitat_extensions.utils import observations_to_image +from vlnce_baselines.common.aux_losses import AuxLosses +from vlnce_baselines.common.env_utils import ( + construct_envs_auto_reset_false, + construct_envs, + is_slurm_batch_job, +) +from vlnce_baselines.common.utils import * + +from habitat_extensions.measures import NDTW +from fastdtw import fastdtw + +from ..utils import get_camera_orientations12 +from ..utils import ( + length2mask, dir_angle_feature, dir_angle_feature_with_ele, +) + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + import tensorflow as tf # noqa: F401 + + +class BaseVLNCETrainer(BaseILTrainer): + r"""A base trainer for VLN-CE imitation learning.""" + supported_tasks: List[str] = ["VLN-v0"] + + def __init__(self, config=None): + super().__init__(config) + self.policy = None + self.device = ( + torch.device("cuda", self.config.TORCH_GPU_ID) + if torch.cuda.is_available() + else torch.device("cpu") + ) + self.obs_transforms = [] + self.start_epoch = 0 + self.step_id = 0 + + def _initialize_policy( + self, + config: Config, + load_from_ckpt: bool, + observation_space: Space, + action_space: Space, + ) -> None: + policy = baseline_registry.get_policy(self.config.MODEL.policy_name) + self.policy = policy.from_config( + config=config, + observation_space=observation_space, + action_space=action_space, + ) + ''' initialize the waypoint predictor here ''' + from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM + self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device) + self.waypoint_predictor.load_state_dict( + torch.load( + 'pretrained/wp_pred/waypoint_predictor', + map_location = torch.device('cpu'), + )['predictor']['state_dict'] + ) + for param in self.waypoint_predictor.parameters(): + param.requires_grad_(False) + + self.policy.to(self.device) + self.waypoint_predictor.to(self.device) + self.num_recurrent_layers = self.policy.net.num_recurrent_layers + + if self.config.GPU_NUMBERS > 1: + print('Using', self.config.GPU_NUMBERS,'GPU!') + self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device], + output_device=self.device, find_unused_parameters=True, broadcast_buffers=False) + # self.waypoint_predictor = DDP(self.waypoint_predictor.to(self.device), device_ids=[self.device], + # output_device=self.device, find_unused_parameters=True, broadcast_buffers=False) + + self.optimizer = torch.optim.AdamW( + self.policy.parameters(), lr=self.config.IL.lr, + ) + + if load_from_ckpt: + ckpt_path = config.IL.ckpt_to_load + ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu") + + if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1: + self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device), + device_ids=[self.device], output_device=self.device) + self.policy.load_state_dict(ckpt_dict["state_dict"]) + self.policy.net = self.policy.net.module + self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device), + device_ids=[self.device], output_device=self.device) + # self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"]) + # self.waypoint_predictor = self.waypoint_predictor.module + else: + self.policy.load_state_dict(ckpt_dict["state_dict"]) + # self.waypoint_predictor.load_state_dict(ckpt_dict["waypoint_predictor_state_dict"]) + if config.IL.is_requeue: + self.optimizer.load_state_dict(ckpt_dict["optim_state"]) + self.start_epoch = ckpt_dict["epoch"] + 1 + self.step_id = ckpt_dict["step_id"] + logger.info(f"Loaded weights from checkpoint: {ckpt_path}") + + self.waypoint_predictor.eval() + + params = sum(param.numel() for param in self.policy.parameters()) + params_t = sum( + p.numel() for p in self.policy.parameters() if p.requires_grad + ) + logger.info(f"Agent parameters: {params/1e6} MB. Trainable: {params_t/1e6} MB") + logger.info("Finished setting up policy.") + + # def save_checkpoint(self, file_name) -> None: + # r"""Save checkpoint with specified name. + + # Args: + # file_name: file name for checkpoint + + # Returns: + # None + # """ + # checkpoint = { + # "state_dict": self.policy.state_dict(), + # "config": self.config, + # } + # torch.save( + # checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name) + # ) + + def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict: + return torch.load(checkpoint_path, *args, **kwargs) + + # def _update_agent( + # self, + # observations, + # prev_actions, + # not_done_masks, + # corrected_actions, + # weights, + # step_grad: bool = True, + # loss_accumulation_scalar: int = 1, + # ): + # T, N = corrected_actions.size() + + # recurrent_hidden_states = torch.zeros( + # N, + # self.num_recurrent_layers, + # self.config.MODEL.STATE_ENCODER.hidden_size, + # device=self.device, + # ) + # AuxLosses.clear() + # # observations['rgb'] = observations['rgb'][0:2] + # # observations['depth'] = observations['depth'][0:2] + # # observations['rxr_instruction'] = observations['rxr_instruction'][0:2] + # # not_done_masks = not_done_masks[0:2] + # # prev_actions = prev_actions[0:2] + + # distribution = self.policy.build_distribution( + # observations, recurrent_hidden_states, prev_actions, not_done_masks) + + # logits = distribution.logits + # logits = logits.view(T, N, -1) + + # action_loss = F.cross_entropy( + # logits.permute(0, 2, 1), corrected_actions, reduction="none" + # ) + # action_loss = ((weights * action_loss).sum(0) / weights.sum(0)).mean() + + # aux_mask = (weights > 0).view(-1) + # aux_loss = AuxLosses.reduce(aux_mask) + + # loss = action_loss + aux_loss + # loss = loss / loss_accumulation_scalar + # loss.backward() + + # if step_grad: + # self.optimizer.step() + # self.optimizer.zero_grad() + + # # if isinstance(aux_loss, torch.Tensor): + # # aux_loss = aux_loss.item() + # # return loss.item(), action_loss.item(), aux_loss + + # return loss, action_loss, aux_loss + + @staticmethod + def _pause_envs( + envs_to_pause, + envs, + recurrent_hidden_states, + not_done_masks, + prev_actions, + batch, + rgb_frames=None, + # positions=None + ): + # pausing envs with no new episode + if len(envs_to_pause) > 0: + state_index = list(range(envs.num_envs)) + for idx in reversed(envs_to_pause): + state_index.pop(idx) + envs.pause_at(idx) + # positions.pop(idx) + + # indexing along the batch dimensions + recurrent_hidden_states = recurrent_hidden_states[state_index] + not_done_masks = not_done_masks[state_index] + prev_actions = prev_actions[state_index] + + for k, v in batch.items(): + batch[k] = v[state_index] + + if rgb_frames is not None: + rgb_frames = [rgb_frames[i] for i in state_index] + + return ( + envs, + recurrent_hidden_states, + not_done_masks, + prev_actions, + batch, + rgb_frames, + # positions + ) + + def _eval_checkpoint( + self, + checkpoint_path: str, + writer: TensorboardWriter, + checkpoint_index: int = 0, + ) -> None: + r"""Evaluates a single checkpoint. + + Args: + checkpoint_path: path of checkpoint + writer: tensorboard writer object + checkpoint_index: index of the current checkpoint + + Returns: + None + """ + if self.local_rank < 1: + logger.info(f"checkpoint_path: {checkpoint_path}") + + if self.config.EVAL.USE_CKPT_CONFIG: + config = self._setup_eval_config( + self.load_checkpoint(checkpoint_path, map_location="cpu")[ + "config" + ] + ) + else: + config = self.config.clone() + config.defrost() + # config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT + # config.TASK_CONFIG.DATASET.ROLES = ["guide"] + # config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES + # config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT + # config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + -1 + ) + config.IL.ckpt_to_load = checkpoint_path + if len(config.VIDEO_OPTION) > 0: + config.defrost() + config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE") + config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS") + config.freeze() + + if config.EVAL.SAVE_RESULTS: + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json", + ) + if os.path.exists(fname): + print("skipping -- evaluation exists.") + return + + envs = construct_envs( + config, get_env_class(config.ENV_NAME), + auto_reset_done=False, + episodes_allowed=self.traj # split by rank + ) + + dataset_length = sum(envs.number_of_episodes) + print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length) + + obs_transforms = get_active_obs_transforms(config) + observation_space = apply_obs_transforms_obs_space( + envs.observation_spaces[0], obs_transforms + ) + self._initialize_policy( + config, + load_from_ckpt=True, + observation_space=observation_space, + action_space=envs.action_spaces[0], + ) + self.policy.eval() + self.waypoint_predictor.eval() + + observations = envs.reset() + observations = extract_instruction_tokens( + observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + if 'CMA' in self.config.MODEL.policy_name: + rnn_states = torch.zeros( + envs.num_envs, + self.num_recurrent_layers, + config.MODEL.STATE_ENCODER.hidden_size, + device=self.device, + ) + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t = torch.zeros( + envs.num_envs, 768, + device=self.device, + ) + language_features = torch.zeros( + envs.num_envs, 80, 768, + device=self.device, + ) + + not_done_masks = torch.zeros( + envs.num_envs, 1, dtype=torch.uint8, device=self.device + ) + + stats_episodes = {} + + rgb_frames = [[] for _ in range(envs.num_envs)] + if len(config.VIDEO_OPTION) > 0: + os.makedirs(config.VIDEO_DIR, exist_ok=True) + + if config.EVAL.EPISODE_COUNT == -1: + episodes_to_eval = sum(envs.number_of_episodes) + else: + episodes_to_eval = min( + config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes) + ) + + pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None + log_str = ( + f"[Ckpt: {checkpoint_index}]" + " [Episodes evaluated: {evaluated}/{total}]" + " [Time elapsed (s): {time}]" + ) + start_time = time.time() + + # number = 0 + total_weight = 0. + ml_loss = 0. + bpositions = [[] for _ in range(envs.num_envs)] + while envs.num_envs > 0 and len(stats_episodes) < episodes_to_eval: + current_episodes = envs.current_episodes() + positions = []; headings = [] + for ob_i in range(len(current_episodes)): + agent_state_i = envs.call_at(ob_i, "get_agent_info", {}) + positions.append(agent_state_i['position']) + headings.append(agent_state_i['heading']) + + with torch.no_grad(): + if 'CMA' in self.config.MODEL.policy_name: + # instructions + instruction_embedding, all_lang_masks = self.policy.net( + mode = "language", + observations = batch, + ) + + # candidate waypoints prediction + cand_rgb, cand_depth, \ + cand_direction, cand_mask, candidate_lengths, \ + batch_angles, batch_distances = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + # navigation action logits + logits, rnn_states = self.policy.net( + mode = 'navigation', + observations = batch, + instruction = instruction_embedding, + text_mask = all_lang_masks, + rnn_states = rnn_states, + headings = headings, + cand_rgb = cand_rgb, + cand_depth = cand_depth, + cand_direction = cand_direction, + cand_mask = cand_mask, + masks = not_done_masks, + ) + logits = logits.masked_fill_(cand_mask, -float('inf')) + + elif 'VLNBERT' in self.config.MODEL.policy_name: + if 'R2R' in self.config.TASK_CONFIG.DATASET.DATA_PATH: + lang_idx_tokens = batch['instruction'] + padding_idx = 0 + lang_masks = (lang_idx_tokens != padding_idx) + lang_lengths = lang_masks.sum(1) + lang_token_type_ids = torch.zeros_like(lang_masks, + dtype=torch.long, device=self.device) + h_t_flag = h_t.sum(1)==0.0 + h_t_init, language_features = self.policy.net( + mode='language', + lang_idx_tokens=lang_idx_tokens, + lang_masks=lang_masks) + elif 'RxR' in self.config.TASK_CONFIG.DATASET.DATA_PATH: + to_be_masked = ((torch.abs(batch['rxr_instruction']) == 0)*1.).mean(-1) + lang_masks = torch.ones_like(to_be_masked) - to_be_masked + # lang_lengths = all_lang_masks.sum(1) + h_t_flag = h_t.sum(1)==0.0 + h_t_init, language_features = self.policy.net( + mode='language', + observations=batch, + lang_masks=lang_masks, + ) + else: + raise NotImplementedError + h_t[h_t_flag] = h_t_init[h_t_flag] + language_features = torch.cat( + (h_t.unsqueeze(1), language_features[:,1:,:]), dim=1) + # candidate waypoints prediction + cand_rgb, cand_depth, \ + cand_direction, cand_mask, candidate_lengths, \ + batch_angles, batch_distances = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + # navigation action logits + logits, h_t = self.policy.net( + mode = 'navigation', + observations=batch, + lang_masks=lang_masks, + lang_feats=language_features, + # lang_token_type_ids=lang_token_type_ids, + headings=headings, + cand_rgb = cand_rgb, + cand_depth = cand_depth, + cand_direction = cand_direction, + cand_mask = cand_mask, + masks = not_done_masks, + ) + logits = logits.masked_fill_(cand_mask, -float('inf')) + + # high-to-low actions in environments + actions = logits.argmax(dim=-1, keepdim=True) + env_actions = [] + for j in range(logits.size(0)): + if actions[j].item() == candidate_lengths[j]-1: + env_actions.append({'action': + {'action': 0, 'action_args':{}}}) + else: + env_actions.append({'action': + {'action': 4, # HIGHTOLOW + 'action_args':{ + 'angle': batch_angles[j][actions[j].item()], + 'distance': batch_distances[j][actions[j].item()], + }}}) + + outputs = envs.step(env_actions) + observations, _, dones, infos = [list(x) for x in zip(*outputs)] + for j, ob in enumerate(observations): + if env_actions[j]['action']['action'] == 0: + continue + else: + envs.call_at(j, + 'change_current_path', # to update and record low-level path + {'new_path': ob.pop('positions'), + 'collisions': ob.pop('collisions')} + ) + + not_done_masks = torch.tensor( + [[0] if done else [1] for done in dones], + dtype=torch.uint8, device=self.device) + + # reset envs and observations if necessary + for i in range(envs.num_envs): + if len(config.VIDEO_OPTION) > 0: + frame = observations_to_image(observations[i], infos[i]) + frame = append_text_to_image( + frame, current_episodes[i].instruction.instruction_text + ) + rgb_frames[i].append(frame) + + if not dones[i]: + continue + + # ep done, calculate metrics + info = infos[i] + metric = {} + metric['steps_taken'] = info['steps_taken'] + ep_id = str(envs.current_episodes()[i].episode_id) + gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float) + if 'current_path' in envs.current_episodes()[i].info.keys(): + positions_ = np.array(envs.current_episodes()[i].info['current_path']).astype(np.float) + collisions_ = np.array(envs.current_episodes()[i].info['collisions']) + assert collisions_.shape[0] == positions_.shape[0] - 1 + else: + positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float) + distance = np.array(info['position']['distance']).astype(np.float) + metric['distance_to_goal'] = distance[-1] + metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0. + metric['oracle_success'] = 1. if (distance <= 3.).any() else 0. + metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum() + try: + metric['collisions'] = collisions_.mean() + except: + metric['collisions'] = 0 + pass + + gt_length = distance[0] + metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length']) + + act_con_path = positions_ + gt_con_path = np.array(dis_to_con(gt_path)).astype(np.float) + dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0] + nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE)) + + metric['ndtw'] = nDTW + stats_episodes[current_episodes[i].episode_id] = metric + + observations[i] = envs.reset_at(i)[0] # envs[i] change to next episode + + if 'CMA' in self.config.MODEL.policy_name: + rnn_states[i] *= 0. + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t[i] *= 0. + + if config.use_pbar: + pbar.update() + else: + logger.info( + log_str.format( + evaluated=len(stats_episodes), + total=episodes_to_eval, + time=round(time.time() - start_time), + ) + ) + + if len(config.VIDEO_OPTION) > 0: + generate_video( + video_option=config.VIDEO_OPTION, + video_dir=config.VIDEO_DIR, + images=rgb_frames[i], + episode_id=current_episodes[i].episode_id, + checkpoint_idx=checkpoint_index, + metrics={ + "spl": stats_episodes[ + current_episodes[i].episode_id + ]["spl"] + }, + tb_writer=writer, + fps=1, + ) + + # del stats_episodes[current_episodes[i].episode_id][ + # "top_down_map_vlnce" + # ] + # del stats_episodes[current_episodes[i].episode_id][ + # "collisions" + # ] + rgb_frames[i] = [] + + observations = extract_instruction_tokens( + observations, + self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + envs_to_pause = [] + next_episodes = envs.current_episodes() + + for i in range(envs.num_envs): + if next_episodes[i].episode_id in stats_episodes: # 出现了重复的ep,表示这个模拟器中的episode已经全部过了一遍 + envs_to_pause.append(i) + + if 'VLNBERT' in self.config.MODEL.policy_name: + rnn_states = h_t + + headings = torch.tensor(headings) + ( + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + rgb_frames, + # positions + ) = self._pause_envs( + envs_to_pause, + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + rgb_frames, + # positions + ) + headings = headings.tolist() + if 'VLNBERT' in self.config.MODEL.policy_name: + h_t = rnn_states + + envs.close() + if config.use_pbar: + pbar.close() + if self.world_size > 1: + distr.barrier() + aggregated_stats = {} + num_episodes = len(stats_episodes) + # print('rank', self.local_rank, 'evaluated',num_episodes, 'episodes') + for stat_key in next(iter(stats_episodes.values())).keys(): + aggregated_stats[stat_key] = ( + sum(v[stat_key] for v in stats_episodes.values()) + / num_episodes + ) + # print(self.local_rank, aggregated_stats) + total = torch.tensor(num_episodes).cuda() + if self.world_size > 1: + dist.reduce(total,dst=0) + total = total.item() + + if self.world_size > 1: + logger.info( + f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_stats}") + for k,v in aggregated_stats.items(): + v = torch.tensor(v*num_episodes).cuda() + # print(self.local_rank, k+':', v.item(), num_episodes, 'before reduce') + cat_v = gather_list_and_concat(v,self.world_size) + # print(self.local_rank, k+':', cat_v, num_episodes, 'after_reduce') + v = (sum(cat_v)/total).item() + # print(self.local_rank, k+':', v, num_episodes, 'after divide total') + aggregated_stats[k] = v + + split = config.TASK_CONFIG.DATASET.SPLIT + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json", + ) + with open(fname, "w") as f: + json.dump(stats_episodes, f, indent=4) + + if self.local_rank < 1: + if config.EVAL.SAVE_RESULTS: + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ckpt_{checkpoint_index}_{split}.json", + ) + with open(fname, "w") as f: + json.dump(aggregated_stats, f, indent=4) + + logger.info(f"Episodes evaluated: {total}") + checkpoint_num = checkpoint_index + 1 + for k, v in aggregated_stats.items(): + logger.info(f"Average episode {k}: {v:.6f}") + writer.add_scalar(f"eval_{k}/{split}", v, checkpoint_num) + + def collect_val_traj(self): + from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1 + trajectories = defaultdict(list) + split = self.config.TASK_CONFIG.DATASET.SPLIT + + if 'rxr' in self.config.BASE_TASK_CONFIG_PATH: + if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file: + gt_data = {} + for role in RxRVLNCEDatasetV1.annotation_roles: + if ( + ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES + and role not in self.config.TASK_CONFIG.DATASET.ROLES + ): + continue + + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.gt_file.format( + split=split, role=role + ), + "rt", + ) as f: + gt_data.update(json.load(f)) + else: + with gzip.open( + self.config.IL.RECOLLECT_TRAINER.gt_path.format( + split=split) + ) as f: + gt_data = json.load(f) + else: + with gzip.open( + self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=split) + ) as f: + gt_data = json.load(f) + + self.gt_data = gt_data + + trajectories = gt_data + self.trajectories = gt_data + trajectories = list(trajectories.keys())[self.config.local_rank::self.config.GPU_NUMBERS] + + return trajectories + + def eval(self) -> None: + r"""Main method of trainer evaluation. Calls _eval_checkpoint() that + is specified in Trainer class that inherits from BaseRLTrainer + or BaseILTrainer + + Returns: + None + """ + self.device = ( + torch.device("cuda", self.config.TORCH_GPU_ID) + if torch.cuda.is_available() + else torch.device("cpu") + ) + + if "tensorboard" in self.config.VIDEO_OPTION: + assert ( + len(self.config.TENSORBOARD_DIR) > 0 + ), "Must specify a tensorboard directory for video display" + os.makedirs(self.config.TENSORBOARD_DIR, exist_ok=True) + if "disk" in self.config.VIDEO_OPTION: + assert ( + len(self.config.VIDEO_DIR) > 0 + ), "Must specify a directory for storing videos on disk" + + world_size = self.config.GPU_NUMBERS + self.world_size = world_size + self.local_rank = self.config.local_rank + + self.config.defrost() + # split = self.config.TASK_CONFIG.DATASET.SPLIT + # self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split + # self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split + self.config.TASK_CONFIG.DATASET.ROLES = ["guide"] + self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['POSITION', 'STEPS_TAKEN'] + self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]] + + if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS: + idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW') + self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWEVAL' + self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.EVAL.LANGUAGES + self.config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT + self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.config.EVAL.SPLIT + self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.config.EVAL.SPLIT + self.config.use_pbar = not is_slurm_batch_job() + # if 'rxr' in self.config.BASE_TASK_CONFIG_PATH: + # self.config.EVAL.trajectories_file = \ + # self.config.EVAL.trajectories_file[:-8] + '_w' + \ + # str(self.world_size) + '_r' + str(self.local_rank) + '.json.gz' + + # if choosing image + resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES + crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS + config = self.config.TASK_CONFIG + camera_orientations = get_camera_orientations12() + + # sensor_uuids = [] + for sensor_type in ["RGB", "DEPTH"]: + resizer_size = dict(resize_config)[sensor_type.lower()] + cropper_size = dict(crop_config)[sensor_type.lower()] + sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR") + + for action, orient in camera_orientations.items(): + camera_template = f"{sensor_type}_{action}" + camera_config = deepcopy(sensor) + camera_config.ORIENTATION = camera_orientations[action] + camera_config.UUID = camera_template.lower() + # sensor_uuids.append(camera_config.UUID) + setattr(config.SIMULATOR, camera_template, camera_config) + config.SIMULATOR.AGENT_0.SENSORS.append(camera_template) + resize_config.append((camera_template.lower(), resizer_size)) + crop_config.append((camera_template.lower(), cropper_size)) + + self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config + self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config + self.config.TASK_CONFIG = config + self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS + + self.config.freeze() + # self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + # self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + # -1 + # ) + torch.cuda.set_device(self.device) + if world_size > 1: + distr.init_process_group(backend='nccl', init_method='env://') + self.device = self.config.TORCH_GPU_IDS[self.local_rank] + torch.cuda.set_device(self.device) + self.config.defrost() + self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank] + self.config.freeze() + # + # if 'rxr' in self.config.BASE_TASK_CONFIG_PATH: + self.traj = self.collect_val_traj() + with TensorboardWriter( + self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs + ) as writer: + if os.path.isfile(self.config.EVAL.CKPT_PATH_DIR): + # evaluate singe checkpoint + proposed_index = get_checkpoint_id( + self.config.EVAL.CKPT_PATH_DIR + ) + if proposed_index is not None: + ckpt_idx = proposed_index + else: + ckpt_idx = 0 + self._eval_checkpoint( + self.config.EVAL.CKPT_PATH_DIR, + writer, + checkpoint_index=ckpt_idx, + ) + else: + # evaluate multiple checkpoints in order + # prev_ckpt_ind = -1 #TODO eval start index + evaluated = [] + while True: + current_ckpt = None + while current_ckpt is None: + checkpoint_folder = self.config.EVAL_CKPT_PATH_DIR + if not self.config.CEPH_IO: + models_paths = [p for p in filter(os.path.isfile, glob.glob(checkpoint_folder + "/*")) if p not in evaluated] + else: + models_paths = [os.path.join(self.config.CEPH_URL,p) for p in self.client.list(self.config.CEPH_URL) if os.path.join(self.config.CEPH_URL,p) not in evaluated] + if len(models_paths) > 0: + models_paths.sort(key=self._get_iter) + current_ckpt = models_paths[0] + prev_ckpt_ind = current_ckpt.split('.')[-2] + else: + current_ckpt = None + time.sleep(2) # sleep for 2 secs before polling again + # time.sleep(10) + if self.local_rank < 1: + logger.info(f"=======current_ckpt: {current_ckpt}=======") + # prev_ckpt_ind += 1 + self._eval_checkpoint( + checkpoint_path=current_ckpt, + writer=writer, + checkpoint_index=prev_ckpt_ind, + ) + evaluated.append(current_ckpt) + + def inference(self) -> None: + r"""Runs inference on a single checkpoint, creating a path predictions file.""" + checkpoint_path = self.config.INFERENCE.CKPT_PATH + logger.info(f"checkpoint_path: {checkpoint_path}") + + self.config.defrost() + self.config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT + self.config.TASK_CONFIG.DATASET.ROLES = ["guide"] + self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.INFERENCE.LANGUAGES + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + -1 + ) + self.config.IL.ckpt_to_load = self.config.INFERENCE.CKPT_PATH + self.config.TASK_CONFIG.TASK.MEASUREMENTS = [] + self.config.TASK_CONFIG.TASK.SENSORS = [ + s for s in self.config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s + ] + ########### Additional Config ########### + self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]] + if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS: + idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW') + self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFERENCE' + # if choosing image + resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES + crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS + config = self.config.TASK_CONFIG + camera_orientations = get_camera_orientations12() + for sensor_type in ["RGB", "DEPTH"]: + resizer_size = dict(resize_config)[sensor_type.lower()] + cropper_size = dict(crop_config)[sensor_type.lower()] + sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR") + + for action, orient in camera_orientations.items(): + camera_template = f"{sensor_type}_{action}" + camera_config = deepcopy(sensor) + camera_config.ORIENTATION = camera_orientations[action] + camera_config.UUID = camera_template.lower() + setattr(config.SIMULATOR, camera_template, camera_config) + config.SIMULATOR.AGENT_0.SENSORS.append(camera_template) + resize_config.append((camera_template.lower(), resizer_size)) + crop_config.append((camera_template.lower(), cropper_size)) + + self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config + self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config + self.config.TASK_CONFIG = config + self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS + # self.config.ENV_NAME = "VLNCEInferenceEnv" #TODO is this necessary? + self.config.freeze() + + if self.config.INFERENCE.USE_CKPT_CONFIG: + config = self._setup_eval_config( + self.load_checkpoint(checkpoint_path, map_location="cpu")[ + "config" + ] + ) + else: + config = self.config.clone() + config.defrost() + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = ( + -1 + ) + config.IL.ckpt_to_load = checkpoint_path + config.freeze() + + eps = self.collect_val_traj() + envs = construct_envs( + config, get_env_class(config.ENV_NAME), + auto_reset_done=False, + episodes_allowed=eps[:10] if sys.gettrace() else None # for debug, ep subset + ) + + obs_transforms = get_active_obs_transforms(config) + observation_space = apply_obs_transforms_obs_space( + envs.observation_spaces[0], obs_transforms + ) + + self._initialize_policy( + config, + load_from_ckpt=True, + observation_space=observation_space, + action_space=envs.action_spaces[0], + ) + self.policy.eval() + self.waypoint_predictor.eval() + + observations = envs.reset() + observations = extract_instruction_tokens( + observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + if 'CMA' in self.config.MODEL.policy_name: + rnn_states = torch.zeros( + envs.num_envs, + self.num_recurrent_layers, + config.MODEL.STATE_ENCODER.hidden_size, + device=self.device, + ) + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t = torch.zeros( + envs.num_envs, 768, + device=self.device, + ) + language_features = torch.zeros( + envs.num_envs, 80, 768, + device=self.device, + ) + not_done_masks = torch.zeros( + envs.num_envs, 1, dtype=torch.uint8, device=self.device + ) + + episode_predictions = defaultdict(list) + # episode ID --> instruction ID for rxr predictions format + instruction_ids: Dict[str, int] = {} + + # populate episode_predictions with the starting state + current_episodes = envs.current_episodes() + for i in range(envs.num_envs): + episode_predictions[current_episodes[i].episode_id].append( + envs.call_at(i, "get_agent_info", {}) + ) + if config.INFERENCE.FORMAT == "rxr": + ep_id = current_episodes[i].episode_id + k = current_episodes[i].instruction.instruction_id + instruction_ids[ep_id] = int(k) + + with tqdm.tqdm( + total=sum(envs.count_episodes()), + desc=f"[inference:{self.config.INFERENCE.SPLIT}]", + ) as pbar: + while envs.num_envs > 0: + current_episodes = envs.current_episodes() + positions = []; headings = [] + for i in range(envs.num_envs): + agent_state_i = envs.call_at(i,"get_agent_info", {}) + positions.append(agent_state_i['position']) + headings.append(agent_state_i['heading']) + + with torch.no_grad(): + if 'CMA' in self.config.MODEL.policy_name: + # instructions + instruction_embedding, all_lang_masks = self.policy.net( + mode = "language", + observations = batch, + ) + + # candidate waypoints prediction + cand_rgb, cand_depth, \ + cand_direction, cand_mask, candidate_lengths, \ + batch_angles, batch_distances = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + # navigation action logits + logits, rnn_states = self.policy.net( + mode = 'navigation', + observations = batch, + instruction = instruction_embedding, + text_mask = all_lang_masks, + rnn_states = rnn_states, + headings = headings, + cand_rgb = cand_rgb, + cand_depth = cand_depth, + cand_direction = cand_direction, + cand_mask = cand_mask, + masks = not_done_masks, + ) + logits = logits.masked_fill_(cand_mask, -float('inf')) + + # high-to-low actions in environments + actions = logits.argmax(dim=-1, keepdim=True) + env_actions = [] + for j in range(logits.size(0)): + if actions[j].item() == candidate_lengths[j]-1: + env_actions.append({'action': + {'action': 0, 'action_args':{}}}) + else: + env_actions.append({'action': + {'action': 4, # HIGHTOLOW + 'action_args':{ + 'angle': batch_angles[j][actions[j].item()], + 'distance': batch_distances[j][actions[j].item()], + }}}) + + outputs = envs.step(env_actions) + observations, _, dones, infos = [list(x) for x in zip(*outputs)] + for i, ob in enumerate(observations): + if env_actions[i]['action']['action'] == 0: + continue + else: + envs.call_at( + i, 'update_cur_path', {'new_path': ob.pop('cur_path')} + ) # to update and record low-level path + + not_done_masks = torch.tensor( + [[0] if done else [1] for done in dones], + dtype=torch.uint8, + device=self.device, + ) + + # reset envs and observations if necessary + for i in range(envs.num_envs): + if not dones[i]: + continue + + ep_id = envs.current_episodes()[i].episode_id + if 'cur_path' in envs.current_episodes()[i].info: + episode_predictions[ep_id] += envs.current_episodes()[i].info['cur_path'] + episode_predictions[ep_id][-1]['stop'] = True + # assert len(episode_predictions[ep_id]) <= 500 + + observations[i] = envs.reset_at(i)[0] + if 'CMA' in self.config.MODEL.policy_name: + rnn_states[i] *= 0. + elif 'VLNBERT' in self.config.MODEL.policy_name: + h_t[i] *= 0. + # prev_actions[i] = torch.zeros(1, dtype=torch.long) + pbar.update() + + observations = extract_instruction_tokens( + observations, + self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + ) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + envs_to_pause = [] + next_episodes = envs.current_episodes() + for i in range(envs.num_envs): + if not dones[i]: + continue + + if next_episodes[i].episode_id in episode_predictions: + envs_to_pause.append(i) + else: + episode_predictions[next_episodes[i].episode_id].append( + envs.call_at(i, "get_agent_info", {}) + ) + if config.INFERENCE.FORMAT == "rxr": + ep_id = next_episodes[i].episode_id + k = next_episodes[i].instruction.instruction_id + instruction_ids[ep_id] = int(k) + # number += 1 + + headings = torch.tensor(headings) + ( + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + rgb_frames, + # positions + ) = self._pause_envs( + envs_to_pause, + envs, + rnn_states, + not_done_masks, + headings, # prev_actions + batch, + # rgb_frames, + # positions + ) + headings = headings.tolist() + if 'VLNBERT' in self.config.MODEL.policy_name: + h_t = rnn_states + + envs.close() + + if config.INFERENCE.FORMAT == "r2r": + with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f: + json.dump(episode_predictions, f, indent=2) + + logger.info( + f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}" + ) + else: # use 'rxr' format for rxr-habitat leaderboard + predictions_out = [] + + for k,v in episode_predictions.items(): + + # save only positions that changed + path = [v[0]["position"]] + for p in v[1:]: + if path[-1] != p["position"]: + path.append(p["position"]) + + predictions_out.append( + { + "instruction_id": instruction_ids[k], + "path": path, + } + ) + + predictions_out.sort(key=lambda x: x["instruction_id"]) + with jsonlines.open( + config.INFERENCE.PREDICTIONS_FILE, mode="w" + ) as writer: + writer.write_all(predictions_out) + + logger.info( + f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}" + ) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vilmodel_cmt.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vilmodel_cmt.py new file mode 100644 index 0000000..40df697 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vilmodel_cmt.py @@ -0,0 +1,753 @@ +import json +import logging +import math +import os +import sys +from io import open +from typing import Callable, List, Tuple +import numpy as np +import copy + +import torch +from torch import nn +from torch import Tensor, device, dtype + +from transformers import BertPreTrainedModel + +logger = logging.getLogger(__name__) + +BertLayerNorm = torch.nn.LayerNorm + + +def gelu(x): + """Implementation of the gelu activation function. + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + Also see https://arxiv.org/abs/1606.08415 + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None, position_ids=None): + seq_length = input_ids.size(1) + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.output_attentions = config.output_attentions + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask, head_mask=None): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + # recurrent vlnbert use attention scores + outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask, head_mask=None): + self_outputs = self.self(input_tensor, attention_mask, head_mask) + attention_output = self.output(self_outputs[0], input_tensor) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, head_mask=None): + attention_outputs = self.attention(hidden_states, attention_mask, head_mask) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + +class BertEncoder(nn.Module): + def __init__(self, config): + super(BertEncoder, self).__init__() + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask, head_mask=None): + all_hidden_states = () + all_attentions = () + for i, layer_module in enumerate(self.layer): + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_outputs = layer_module(hidden_states, attention_mask, + None if head_mask is None else head_mask[i]) + hidden_states = layer_outputs[0] + + if self.output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states,) + if self.output_attentions: + outputs = outputs + (all_attentions,) + return outputs # last-layer hidden state, (all hidden states), (all attentions) + + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(BertPredictionHeadTransform, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super(BertLMPredictionHead, self).__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, + config.vocab_size, + bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super(BertOnlyMLMHead, self).__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + +class BertOutAttention(nn.Module): + def __init__(self, config, ctx_dim=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + if ctx_dim is None: + ctx_dim =config.hidden_size + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(ctx_dim, self.all_head_size) + self.value = nn.Linear(ctx_dim, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, context, attention_mask=None): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(context) + mixed_value_layer = self.value(context) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + if attention_mask is not None: + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer, attention_probs + +class BertXAttention(nn.Module): + def __init__(self, config, ctx_dim=None): + super().__init__() + self.att = BertOutAttention(config, ctx_dim=ctx_dim) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None): + output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask) + attention_output = self.output(output, input_tensor) + return attention_output, attention_scores + +class LXRTXLayer(nn.Module): + def __init__(self, config): + super().__init__() + + self.no_lang_ca = config.no_lang_ca # do not update language embeds + + # Lang self-att and FFN layer + self.lang_self_att = BertAttention(config) + self.lang_inter = BertIntermediate(config) + self.lang_output = BertOutput(config) + + # Visn self-att and FFN layer + self.visn_self_att = BertAttention(config) + self.visn_inter = BertIntermediate(config) + self.visn_output = BertOutput(config) + + # The cross attention layer + self.visual_attention = BertXAttention(config) + + def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask): + # Cross Attention + if self.no_lang_ca: + lang_att_output = lang_input + else: + lang_att_output, _ = self.visual_attention(lang_input, visn_input, ctx_att_mask=visn_attention_mask) + visn_att_output, cross_attention_score = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask) + return lang_att_output, visn_att_output, cross_attention_score + + def self_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask): + # Self Attention + if self.no_lang_ca: + lang_att_output = (lang_input, ) + else: + lang_att_output = self.lang_self_att(lang_input, lang_attention_mask) + + visn_att_output = self.visn_self_att(visn_input, visn_attention_mask) + return lang_att_output, visn_att_output + + def output_fc(self, lang_input, visn_input): + # FC layers + if not self.no_lang_ca: + lang_inter_output = self.lang_inter(lang_input) + visn_inter_output = self.visn_inter(visn_input) + + # Layer output + if self.no_lang_ca: + lang_output = lang_input + else: + lang_output = self.lang_output(lang_inter_output, lang_input) + visn_output = self.visn_output(visn_inter_output, visn_input) + return lang_output, visn_output + + def forward(self, lang_feats, lang_attention_mask, + visn_feats, visn_attention_mask): + lang_att_output = lang_feats + visn_att_output = visn_feats + + lang_att_output, visn_att_output, cross_attention_score = self.cross_att(lang_att_output, lang_attention_mask, + visn_att_output, visn_attention_mask) + lang_attention_score = cross_attention_score[:,:,0,:] + lang_att_output, visn_att_output = self.self_att(lang_att_output, lang_attention_mask, + visn_att_output, visn_attention_mask) + lang_output, visn_output = self.output_fc(lang_att_output[0], visn_att_output[0]) + + return lang_output, visn_output, lang_attention_score + +class LxmertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + + self.num_l_layers = config.num_l_layers + self.num_r_layers = config.num_r_layers + self.num_h_layers = config.num_h_layers + self.num_x_layers = config.num_x_layers + self.update_lang_bert = config.update_lang_bert + + # Using self.layer instead of self.l_layers to support loading BERT weights. + self.layer = nn.ModuleList( + [BertLayer(config) for _ in range(self.num_l_layers)] + ) + if not self.update_lang_bert: + for name, param in self.layer.named_parameters(): + param.requires_grad_(False) + + self.h_layers = nn.ModuleList( + [BertLayer(config) for _ in range(self.num_h_layers)] + ) if self.num_h_layers > 0 else None + self.r_layers = nn.ModuleList( + [BertLayer(config) for _ in range(self.num_r_layers)] + ) if self.num_r_layers > 0 else None + self.x_layers = nn.ModuleList( + [LXRTXLayer(config) for _ in range(self.num_x_layers)] + ) + + def forward(self, txt_embeds, extended_txt_masks, hist_embeds, + extended_hist_masks, img_embeds=None, extended_img_masks=None): + # text encoding + for layer_module in self.layer: + temp_output = layer_module(txt_embeds, extended_txt_masks) + txt_embeds = temp_output[0] + + # if not self.update_lang_bert: + # txt_embeds = txt_embeds.detach() + + # image encoding + if img_embeds is not None: + if self.r_layers is not None: + for layer_module in self.r_layers: + temp_output = layer_module(img_embeds, extended_img_masks) + img_embeds = temp_output[0] + + # history encoding + if self.h_layers is not None: + for layer_module in self.h_layers: + temp_output = layer_module(hist_embeds, extended_hist_masks) + hist_embeds = temp_output[0] + hist_max_len = hist_embeds.size(1) + + # cross-modal encoding + if img_embeds is None: + hist_img_embeds = hist_embeds + extended_hist_img_masks = extended_hist_masks + else: + hist_img_embeds = torch.cat([hist_embeds, img_embeds], 1) + extended_hist_img_masks = torch.cat([extended_hist_masks, extended_img_masks], -1) + + for layer_module in self.x_layers: + txt_embeds, hist_img_embeds = layer_module( + txt_embeds, extended_txt_masks, + hist_img_embeds, extended_hist_img_masks) + + hist_embeds = hist_img_embeds[:, :hist_max_len] + if img_embeds is not None: + img_embeds = hist_img_embeds[:, hist_max_len:] + return txt_embeds, hist_embeds, img_embeds + + + +class ImageEmbeddings(nn.Module): + def __init__(self, config): + super().__init__() + self.img_linear = nn.Linear(config.image_feat_size, config.hidden_size) + self.img_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dep_linear = nn.Linear(config.depth_feat_size, config.hidden_size) + self.dep_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.ang_linear = nn.Linear(config.angle_feat_size, config.hidden_size) + self.ang_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dis_linear = nn.Linear(config.angle_feat_size, config.hidden_size) + self.dis_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + # 0: non-navigable, 1: navigable, 2: stop + self.nav_type_embedding = nn.Embedding(3, config.hidden_size) + + # tf naming convention for layer norm + self.layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, rgb_feat, dep_feat, ang_feat, dis_feat, type_embeddings, nav_types=None): + transformed_im = self.img_layer_norm(self.img_linear(rgb_feat)) + transformed_dep = self.dep_layer_norm(self.dep_linear(dep_feat)) + transformed_ang = self.ang_layer_norm(self.ang_linear(ang_feat)) + transformed_dis = self.dis_layer_norm(self.dis_linear(dis_feat)) + embeddings = transformed_im + transformed_dep + transformed_ang + transformed_dis + type_embeddings + if nav_types is not None: + nav_embeddings = self.nav_type_embedding(nav_types) + embeddings = embeddings + nav_embeddings + embeddings = self.layer_norm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + +class HistoryEmbeddings(nn.Module): + def __init__(self, config): + super().__init__() + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + + self.img_linear = nn.Linear(config.image_feat_size, config.hidden_size) + self.img_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + # self.dep_linear = nn.Linear(config.depth_feat_size, config.hidden_size) + # self.dep_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.ang_linear = nn.Linear(config.angle_feat_size, config.hidden_size) + self.ang_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + + self.position_embeddings = nn.Embedding(config.max_action_steps, config.hidden_size) + # special type embedding for history + self.type_embedding = nn.Embedding(1, config.hidden_size) + + # tf naming convention for layer norm + self.layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + self.hist_enc_pano = config.hist_enc_pano + if config.hist_enc_pano: + self.pano_img_linear = nn.Linear(config.image_feat_size, config.hidden_size) + self.pano_img_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + # self.pano_dep_linear = nn.Linear(config.depth_feat_size, config.hidden_size) + # self.pano_dep_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.pano_ang_linear = nn.Linear(config.angle_feat_size, config.hidden_size) + self.pano_ang_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + pano_enc_config = copy.copy(config) + pano_enc_config.num_hidden_layers = config.num_h_pano_layers + self.pano_encoder = BertEncoder(pano_enc_config) + else: + self.pano_encoder = None + + def forward(self, img_feats, dep_feats, ang_feats, pos_ids, + pano_img_feats=None, pano_dep_feats=None, pano_ang_feats=None): + '''Args: + - img_feats: (batch_size, dim_feat) + - pos_ids: (batch_size, ) + - pano_img_feats: (batch_size, pano_len, dim_feat) + ''' + device = next(iter(self.parameters())).device + if img_feats is not None: + batch_size = img_feats.size(0) + else: + batch_size = 1 + + type_ids = torch.zeros((batch_size, )).long().to(device) + type_embeddings = self.type_embedding(type_ids) + + if img_feats is None: + cls_embeddings = self.dropout(self.layer_norm( + self.cls_token.expand(batch_size, -1, -1)[:, 0] + type_embeddings)) + return cls_embeddings + + # history embedding per step + embeddings = self.img_layer_norm(self.img_linear(img_feats)) + \ + self.ang_layer_norm(self.ang_linear(ang_feats)) + \ + self.position_embeddings(pos_ids) + \ + type_embeddings + + if self.pano_encoder is not None: + pano_embeddings = self.pano_img_layer_norm(self.pano_img_linear(pano_img_feats)) + \ + self.pano_ang_layer_norm(self.pano_ang_linear(pano_ang_feats)) + pano_embeddings = self.dropout(pano_embeddings) + # TODO: mask is always True + batch_size, pano_len, _ = pano_img_feats.size() + extended_pano_masks = torch.zeros(batch_size, pano_len).float().to(device).unsqueeze(1).unsqueeze(2) + pano_embeddings = self.pano_encoder(pano_embeddings, extended_pano_masks)[0] + pano_embeddings = torch.mean(pano_embeddings, 1) + + embeddings = embeddings + pano_embeddings # history既包含了orientation方位的img,也包含了pano的context + + embeddings = self.layer_norm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class NextActionPrediction(nn.Module): + def __init__(self, hidden_size, dropout_rate): + super().__init__() + self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size), + nn.ReLU(), + BertLayerNorm(hidden_size, eps=1e-12), + nn.Dropout(dropout_rate), + nn.Linear(hidden_size, 1)) + + def forward(self, x): + return self.net(x) + + +class NavCMT(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.embeddings = BertEmbeddings(config) + self.img_embeddings = ImageEmbeddings(config) + + self.hist_embeddings = HistoryEmbeddings(config) + + self.encoder = LxmertEncoder(config) + + self.next_action = NextActionPrediction(config.hidden_size, config.pred_head_dropout_prob) + + self.init_weights() + + if self.config.fix_lang_embedding: + for name, param in self.embeddings.named_parameters(): + if 'token_type_embeddings' not in name: + param.requires_grad_(False) + if self.config.fix_hist_embedding: + for parma in self.hist_embeddings.parameters(): + parma.requires_grad_(False) + if self.config.fix_obs_embedding: + for parma in self.img_embeddings.parameters(): + parma.requires_grad_(False) + + + def forward(self, mode, txt_ids=None, txt_embeds=None, txt_masks=None, + hist_rgb_fts=None, hist_depth_fts=None, hist_ang_fts=None, + hist_pano_rgb_fts=None, hist_pano_depth_fts=None, hist_pano_ang_fts=None, + hist_embeds=None, ob_step_ids=None, hist_masks=None, + ob_rgb_fts=None, ob_dep_fts=None, ob_ang_fts=None, ob_dis_fts=None, ob_nav_types=None, + ob_masks=None): + + # text embedding + if mode == 'language': + ''' LXMERT language branch (in VLN only perform this at initialization) ''' + extended_txt_masks = txt_masks.unsqueeze(1).unsqueeze(2) + extended_txt_masks = extended_txt_masks.to(dtype=self.dtype) + extended_txt_masks = (1.0 - extended_txt_masks) * -10000.0 + + txt_token_type_ids = torch.zeros_like(txt_ids) + txt_embeds = self.embeddings(txt_ids, token_type_ids=txt_token_type_ids) + for layer_module in self.encoder.layer: + temp_output = layer_module(txt_embeds, extended_txt_masks) + txt_embeds = temp_output[0] + # if self.config.fix_lang_embedding: + # txt_embeds = txt_embeds.detach() + if self.config.no_lang_ca: # run self-attn layers for lang + all_txt_embeds = [txt_embeds] + for layer_module in self.encoder.x_layers: + lang_att_output = layer_module.lang_self_att(txt_embeds, extended_txt_masks)[0] + lang_inter_output = layer_module.lang_inter(lang_att_output) + lang_output = layer_module.lang_output(lang_inter_output, lang_att_output) + all_txt_embeds.append(lang_output) + return all_txt_embeds + return txt_embeds + + # history embedding per step + if mode == 'history': + hist_embeds = self.hist_embeddings(hist_rgb_fts, hist_depth_fts, hist_ang_fts, ob_step_ids, + pano_img_feats=hist_pano_rgb_fts, pano_dep_feats=hist_pano_depth_fts, pano_ang_feats=hist_pano_ang_fts) + # if self.config.fix_hist_embedding: + # hist_embeds = hist_embeds.detach() + return hist_embeds + + # cross-modal encoding per step + elif mode == 'navigation': + ''' LXMERT visual branch''' + # history embedding + extended_hist_masks = hist_masks.unsqueeze(1).unsqueeze(2) + extended_hist_masks = extended_hist_masks.to(dtype=self.dtype) + extended_hist_masks = (1.0 - extended_hist_masks) * -10000.0 + + # if self.encoder.h_layers is not None: + # for layer_module in self.encoder.h_layers: + # temp_output = layer_module(hist_embeds, extended_hist_masks) + # hist_embeds = temp_output[0] + + # image embedding + extended_ob_masks = ob_masks.unsqueeze(1).unsqueeze(2) + extended_ob_masks = extended_ob_masks.to(dtype=self.dtype) + extended_ob_masks = (1.0 - extended_ob_masks) * -10000.0 + + ob_token_type_ids = torch.ones(ob_rgb_fts.size(0), ob_rgb_fts.size(1), dtype=torch.long, device=self.device) + ob_embeds = self.img_embeddings(ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, + self.embeddings.token_type_embeddings(ob_token_type_ids), + nav_types=ob_nav_types) + # if self.encoder.r_layers is not None: + # for layer_module in self.encoder.r_layers: + # temp_output = layer_module(ob_embeds, extended_ob_masks) + # ob_embeds = temp_output[0] + # if self.config.fix_obs_embedding: + # ob_embeds = ob_embeds.detach() + + # multi-modal encoding + hist_max_len = hist_embeds.size(1) + hist_ob_embeds = torch.cat([hist_embeds, ob_embeds], 1) + extended_hist_ob_masks = torch.cat([extended_hist_masks, extended_ob_masks], -1) + + extended_txt_masks = txt_masks.unsqueeze(1).unsqueeze(2) + extended_txt_masks = extended_txt_masks.to(dtype=self.dtype) + extended_txt_masks = (1.0 - extended_txt_masks) * -10000.0 + + if self.config.no_lang_ca: + all_txt_embeds = txt_embeds + for l, layer_module in enumerate(self.encoder.x_layers): + if self.config.no_lang_ca: + txt_embeds = all_txt_embeds[l] + txt_embeds, hist_ob_embeds, lang_attention_score = layer_module( + txt_embeds, extended_txt_masks, + hist_ob_embeds, extended_hist_ob_masks, + ) + + hist_embeds = hist_ob_embeds[:, :hist_max_len] + ob_embeds = hist_ob_embeds[:, hist_max_len:] + + # TODO + if self.config.no_lang_ca: + act_logits = self.next_action(ob_embeds).squeeze(-1) + else: + if self.config.act_pred_token == 'ob_txt': + act_logits = self.next_action(ob_embeds * txt_embeds[:, :1]).squeeze(-1) + elif self.config.act_pred_token == 'ob': + act_logits = self.next_action(ob_embeds).squeeze(-1) + elif self.config.act_pred_token == 'ob_hist': + act_logits = self.next_action(ob_embeds * hist_embeds[:, :1]).squeeze(-1) + elif self.config.act_pred_token == 'ob_txt_hist': + act_logits = self.next_action(ob_embeds * (txt_embeds[:, :1] + hist_embeds[:, :1])).squeeze(-1) + act_logits.masked_fill_(ob_nav_types==0, -float('inf')) + # ob_nav_type的形状: 1,1,1,1,2,0,0,0,0,0 + + return act_logits, txt_embeds, hist_embeds, ob_embeds, lang_attention_score + diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vlnbert_init.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vlnbert_init.py new file mode 100644 index 0000000..1bb7882 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/hamt/vlnbert_init.py @@ -0,0 +1,75 @@ +import torch + + +def get_tokenizer(args): + from transformers import AutoTokenizer + if args.dataset == 'rxr' or args.tokenizer == 'xlm': + cfg_name = 'xlm-roberta-base' + else: + cfg_name = 'bert-base-uncased' + tokenizer = AutoTokenizer.from_pretrained(cfg_name) + return tokenizer + +def get_vlnbert_models(config=None): + + from transformers import PretrainedConfig + from vlnce_baselines.models.hamt.vilmodel_cmt import NavCMT + + model_class = NavCMT + + model_name_or_path = config.pretrained_path + new_ckpt_weights = {} + if model_name_or_path is not None: + ckpt_weights = torch.load(model_name_or_path, map_location='cpu') + for k, v in ckpt_weights.items(): + if k.startswith('module'): + new_ckpt_weights[k[7:]] = v + else: + # add next_action in weights + if k.startswith('next_action'): + k = 'bert.' + k + new_ckpt_weights[k] = v + + if config.task_type == 'r2r': + cfg_name = 'pretrained/Prevalent/bert-base-uncased' + elif config.task_type == 'rxr': + cfg_name = 'pretrained/xlm-roberta-base' + vis_config = PretrainedConfig.from_pretrained(cfg_name) + + if config.task_type == 'r2r': + vis_config.image_feat_size = 768 + vis_config.max_action_steps = 50 + elif config.task_type == 'rxr': + vis_config.type_vocab_size = 2 + vis_config.image_feat_size = 512 + vis_config.max_action_steps = 100 + + # vis_config.image_feat_size = 768 + vis_config.depth_feat_size = 128 + vis_config.angle_feat_size = 4 + vis_config.num_l_layers = 9 + vis_config.num_r_layers = 0 + vis_config.num_h_layers = 0 + vis_config.num_x_layers = 4 + vis_config.hist_enc_pano = True + vis_config.num_h_pano_layers = 2 + + vis_config.fix_lang_embedding = config.fix_lang_embedding + vis_config.fix_hist_embedding = config.fix_hist_embedding + vis_config.fix_obs_embedding = config.fix_obs_embedding + + vis_config.update_lang_bert = not vis_config.fix_lang_embedding + vis_config.output_attentions = True + vis_config.pred_head_dropout_prob = 0.1 + + vis_config.no_lang_ca = False + vis_config.act_pred_token = 'ob_txt' + # vis_config.max_action_steps = 50 + # vis_config.max_action_steps = 100 + + visual_model = model_class.from_pretrained( + pretrained_model_name_or_path=None, + config=vis_config, + state_dict=new_ckpt_weights) + + return visual_model diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/policy.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/policy.py new file mode 100755 index 0000000..beadb56 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/policy.py @@ -0,0 +1,92 @@ +import abc +from typing import Any + +from habitat_baselines.rl.ppo.policy import Policy +from habitat_baselines.utils.common import ( + CategoricalNet, + CustomFixedCategorical, +) +from torch.distributions import Categorical + + +class ILPolicy(Policy, metaclass=abc.ABCMeta): + def __init__(self, net, dim_actions): + r"""Defines an imitation learning policy as having functions act() and + build_distribution(). + """ + super(Policy, self).__init__() + self.net = net + self.dim_actions = dim_actions + + # self.action_distribution = CategoricalNet( + # self.net.output_size, self.dim_actions + # ) + + def forward(self, *x): + raise NotImplementedError + + def act( + self, + observations, + rnn_hidden_states, + prev_actions, + masks, + deterministic=False, + ): + + print('need to revise for CMA and VLNBERT') + import pdb; pdb.set_trace() + + features, rnn_hidden_states = self.net( + observations, rnn_hidden_states, prev_actions, masks + ) + distribution = self.action_distribution(features) + + # if distribution.logit + if deterministic: + action = distribution.mode() + else: + action = distribution.sample() + + return action, rnn_hidden_states + + def get_value(self, *args: Any, **kwargs: Any): + raise NotImplementedError + + def evaluate_actions(self, *args: Any, **kwargs: Any): + raise NotImplementedError + + def build_distribution( + self, observations, rnn_hidden_states, prev_actions, masks + ) -> CustomFixedCategorical: + features, rnn_hidden_states = self.net( + observations, rnn_hidden_states, prev_actions, masks + ) + return self.action_distribution(features) + + def act2( + self, + observations, + rnn_hidden_states, + prev_actions, + masks, + deterministic=False, + ): + + print('need to revise for CMA and VLNBERT') + import pdb; pdb.set_trace() + + feature_rgb, feature_depth, rnn_hidden_states = self.net( + observations, rnn_hidden_states, prev_actions, masks + ) + distribution_rgb = self.action_distribution(feature_rgb) + distribution_depth = self.action_distribution(feature_depth) + + probs = (distribution_rgb.probs + distribution_depth.probs)/2 + # if distribution.logit + if deterministic: + action = probs.argmax(dim=-1, keepdim=True) + else: + action = Categorical(probs).sample().unsqueeze(-1) + + return action, rnn_hidden_states diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/utils.py new file mode 100755 index 0000000..46d797a --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/utils.py @@ -0,0 +1,82 @@ +import math +from turtle import heading +import torch + + +def angle_feature(headings, device=None): + # twopi = math.pi * 2 + # heading = (heading + twopi) % twopi # From 0 ~ 2pi + # It will be the same + heading_enc = torch.zeros(len(headings), 64, dtype=torch.float32) + + for i, head in enumerate(headings): + heading_enc[i] = torch.tensor( + [math.sin(head), math.cos(head)] * (64 // 2)) + + return heading_enc.to(device) + +def dir_angle_feature(angle_list, device=None): + feature_dim = 64 + batch_size = len(angle_list) + max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop + heading_enc = torch.zeros( + batch_size, max_leng, feature_dim, dtype=torch.float32) + + for i in range(batch_size): + for j, angle_rad in enumerate(angle_list[i]): + heading_enc[i][j] = torch.tensor( + [math.sin(angle_rad), + math.cos(angle_rad)] * (feature_dim // 2)) + + return heading_enc + + +def angle_feature_with_ele(headings, device=None): + # twopi = math.pi * 2 + # heading = (heading + twopi) % twopi # From 0 ~ 2pi + # It will be the same + heading_enc = torch.zeros(len(headings), 128, dtype=torch.float32) + + for i, head in enumerate(headings): + heading_enc[i] = torch.tensor( + [ + math.sin(head), math.cos(head), + math.sin(0.0), math.cos(0.0), # elevation + ] * (128 // 4)) + + return heading_enc.to(device) + +def angle_feature_torch(headings: torch.Tensor): + return torch.stack( + [ + torch.sin(headings), + torch.cos(headings), + torch.sin(torch.zeros_like(headings)), + torch.cos(torch.zeros_like(headings)) + ] + ).float().T + +def dir_angle_feature_with_ele(angle_list, device=None): + feature_dim = 128 + batch_size = len(angle_list) + max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop + heading_enc = torch.zeros( + batch_size, max_leng, feature_dim, dtype=torch.float32) + + for i in range(batch_size): + for j, angle_rad in enumerate(angle_list[i]): + heading_enc[i][j] = torch.tensor( + [ + math.sin(angle_rad), math.cos(angle_rad), + math.sin(0.0), math.cos(0.0), # elevation + ] * (128 // 4)) + + return heading_enc + + +def length2mask(length, size=None): + batch_size = len(length) + size = int(max(length)) if size is None else size + mask = (torch.arange(size, dtype=torch.int64).unsqueeze(0).repeat(batch_size, 1) + > (torch.LongTensor(length) - 1).unsqueeze(1)).cuda() + return mask \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/functional.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/functional.py new file mode 100644 index 0000000..8e12e28 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/functional.py @@ -0,0 +1,89 @@ +import numbers +import cv2 +import numpy as np +import PIL +import torch + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[0], size[1] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.BILINEAR + else: + pil_inter = PIL.Image.NEAREST + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + + return clip diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/get_args.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/get_args.py new file mode 100644 index 0000000..c58a3b9 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/get_args.py @@ -0,0 +1,203 @@ +import argparse + +def get_args(): + parser = argparse.ArgumentParser('VideoMAE fine-tuning and evaluation script for video classification', add_help=False) + parser.add_argument('--batch_size', default=64, type=int) + parser.add_argument('--epochs', default=30, type=int) + parser.add_argument('--update_freq', default=1, type=int) + parser.add_argument('--save_ckpt_freq', default=100, type=int) + + # Model parameters + parser.add_argument('--model', default='vit_base_patch16_224', type=str, metavar='MODEL', + help='Name of model to train') + parser.add_argument('--tubelet_size', type=int, default= 2) + parser.add_argument('--input_size', default=224, type=int, + help='videos input size') + + parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') + parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', + help='Attention dropout rate (default: 0.)') + parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', + help='Drop path rate (default: 0.1)') + + parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) + parser.add_argument('--model_ema', action='store_true', default=False) + parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') + parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') + + # Optimizer parameters + parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight_decay', type=float, default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the + weight decay. We use a cosine schedule for WD and using a larger decay by + the end of training improves performance for ViTs.""") + + parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', + help='learning rate (default: 1e-3)') + parser.add_argument('--layer_decay', type=float, default=0.75) + + parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', + help='warmup learning rate (default: 1e-6)') + parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + + parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', + help='num of steps to warmup LR, will overload warmup_epochs if set > 0') + + # Augmentation parameters + parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', + help='Color jitter factor (default: 0.4)') + parser.add_argument('--num_sample', type=int, default=2, + help='Repeated_aug (default: 2)') + parser.add_argument('--aa', type=str, default='rand-m7-n4-mstd0.5-inc1', metavar='NAME', + help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'), + parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') + parser.add_argument('--train_interpolation', type=str, default='bicubic', + help='Training interpolation (random, bilinear, bicubic default: "bicubic")') + + # Evaluation parameters + parser.add_argument('--crop_pct', type=float, default=None) + parser.add_argument('--short_side_size', type=int, default=224) + parser.add_argument('--test_num_segment', type=int, default=5) + parser.add_argument('--test_num_crop', type=int, default=3) + + # Random Erase params + parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', + help='Random erase prob (default: 0.25)') + parser.add_argument('--remode', type=str, default='pixel', + help='Random erase mode (default: "pixel")') + parser.add_argument('--recount', type=int, default=1, + help='Random erase count (default: 1)') + parser.add_argument('--resplit', action='store_true', default=False, + help='Do not random erase first (clean) augmentation split') + + # Mixup params + parser.add_argument('--mixup', type=float, default=0.8, + help='mixup alpha, mixup enabled if > 0.') + parser.add_argument('--cutmix', type=float, default=1.0, + help='cutmix alpha, cutmix enabled if > 0.') + parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, + help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') + parser.add_argument('--mixup_prob', type=float, default=1.0, + help='Probability of performing mixup or cutmix when either/both is enabled') + parser.add_argument('--mixup_switch_prob', type=float, default=0.5, + help='Probability of switching to cutmix when both mixup and cutmix enabled') + parser.add_argument('--mixup_mode', type=str, default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') + + # Finetuning params + parser.add_argument('--finetune', default='', help='finetune from checkpoint') + parser.add_argument('--model_key', default='model|module', type=str) + parser.add_argument('--model_prefix', default='', type=str) + parser.add_argument('--init_scale', default=0.001, type=float) + parser.add_argument('--use_mean_pooling', action='store_true') + parser.set_defaults(use_mean_pooling=True) + parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') + + # Dataset parameters + parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str, + help='dataset path') + parser.add_argument('--eval_data_path', default=None, type=str, + help='dataset path for evaluation') + parser.add_argument('--nb_classes', default=400, type=int, + help='number of the classification types') + parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') + parser.add_argument('--num_segments', type=int, default= 1) + parser.add_argument('--num_frames', type=int, default= 16) + parser.add_argument('--sampling_rate', type=int, default= 4) + parser.add_argument('--data_set', default='Kinetics-400', choices=['Kinetics-400', 'SSV2', 'UCF101', 'HMDB51','image_folder'], + type=str, help='dataset') + parser.add_argument('--output_dir', default='', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', default=None, + help='path where to tensorboard log') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', + help='resume from checkpoint') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') + parser.set_defaults(auto_resume=True) + + parser.add_argument('--save_ckpt', action='store_true') + parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') + parser.set_defaults(save_ckpt=True) + + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true', + help='Perform evaluation only') + parser.add_argument('--dist_eval', action='store_true', default=False, + help='Enabling distributed evaluation') + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument('--pin_mem', action='store_true', + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', default='env://', + help='url used to set up distributed training') + + parser.add_argument('--enable_deepspeed', action='store_true', default=False) + + parser.add_argument( + "--exp_name", + type=str, + default="test", + required=True, + help="experiment id that matches to exp-id in Notion log", + ) + parser.add_argument( + "--run-type", + choices=["train", "eval", "inference"], + required=True, + help="run type of the experiment (train, eval, inference)", + ) + parser.add_argument( + "--exp-config", + type=str, + required=True, + help="path to config yaml containing info about experiment", + ) + parser.add_argument( + "opts", + default=None, + nargs=argparse.REMAINDER, + help="Modify config options from command line", + ) + + known_args, _ = parser.parse_known_args() + + if known_args.enable_deepspeed: + try: + import deepspeed + from deepspeed import DeepSpeedConfig + parser = deepspeed.add_config_arguments(parser) + ds_init = deepspeed.initialize + except: + print("Please 'pip install deepspeed'") + exit(0) + else: + ds_init = None + + return parser.parse_args(), ds_init \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/modeling_finetune.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/modeling_finetune.py new file mode 100644 index 0000000..31ba689 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/modeling_finetune.py @@ -0,0 +1,344 @@ +from functools import partial +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + **kwargs + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., + proj_drop=0., attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, + attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, + attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.tubelet_size = int(tubelet_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d(in_channels=in_chans, out_channels=embed_dim, + kernel_size = (self.tubelet_size, patch_size[0],patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1])) + + def forward(self, x, **kwargs): + B, C, T, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) + return x + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + ''' Sinusoid position encoding table ''' + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=0., + use_learnable_pos_emb=False, + init_scale=0., + all_frames=16, + tubelet_size=2, + use_mean_pooling=True): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=all_frames, tubelet_size=self.tubelet_size) + num_patches = self.patch_embed.num_patches + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + self.pos_drop = nn.Dropout(p=drop_rate) + + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + + trunc_normal_(self.head.weight, std=.02) + self.apply(self._init_weights) + + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + B, _, _ = x.size() + + if self.pos_embed is not None: + x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)) + else: + return x[:, 0] + + def get_spatial_features(self, x): + x = self.patch_embed(x) + B, _, _ = x.size() + + if self.pos_embed is not None: + x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + x = x.transpose(1,2).reshape(-1,768,8,14,14).mean(2) + + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_512(pretrained=False, **kwargs): + model = VisionTransformer( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/rand_augment.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/rand_augment.py new file mode 100644 index 0000000..37c57d1 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/rand_augment.py @@ -0,0 +1,531 @@ +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py +pulished under an Apache License 2.0. + +COMMENT FROM ORIGINAL: +AutoAugment, RandAugment, and AugMix for PyTorch +This code implements the searched ImageNet policies with various tweaks and +improvements and does not include any of the search code. AA and RA +Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py +AugMix adapted from: + https://github.com/google-research/augmix +Papers: + AutoAugment: Learning Augmentation Policies from Data + https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection + https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... + https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and + Uncertainty https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +import numpy as np +import random +import re +import PIL +from PIL import Image, ImageEnhance, ImageOps + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]]) + +_FILL = (128, 128, 128) + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10.0 + +_HPARAMS_DEFAULT = { + "translate_const": 250, + "img_mean": _FILL, +} + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop("resample", Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if "fillcolor" in kwargs and _PIL_VER < (5, 0): + kwargs.pop("fillcolor") + kwargs["resample"] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs + ) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs + ) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], + -rotn_center[1] - post_trans[1], + matrix, + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs["resample"]) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _MAX_LEVEL) * 30.0 + level = _randomly_negate(level) + return (level,) + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return ((level / _MAX_LEVEL) * 1.8 + 0.1,) + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] + level = (level / _MAX_LEVEL) * 0.9 + level = 1.0 + _randomly_negate(level) + return (level,) + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _MAX_LEVEL) * 0.3 + level = _randomly_negate(level) + return (level,) + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams["translate_const"] + level = (level / _MAX_LEVEL) * float(translate_const) + level = _randomly_negate(level) + return (level,) + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get("translate_pct", 0.45) + level = (level / _MAX_LEVEL) * translate_pct + level = _randomly_negate(level) + return (level,) + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4),) + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return (4 - _posterize_level_to_arg(level, hparams)[0],) + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4) + 4,) + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 256),) + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return (256 - _solarize_level_to_arg(level, _hparams)[0],) + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return (int((level / _MAX_LEVEL) * 110),) + + +LEVEL_TO_ARG = { + "AutoContrast": None, + "Equalize": None, + "Invert": None, + "Rotate": _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + "Posterize": _posterize_level_to_arg, + "PosterizeIncreasing": _posterize_increasing_level_to_arg, + "PosterizeOriginal": _posterize_original_level_to_arg, + "Solarize": _solarize_level_to_arg, + "SolarizeIncreasing": _solarize_increasing_level_to_arg, + "SolarizeAdd": _solarize_add_level_to_arg, + "Color": _enhance_level_to_arg, + "ColorIncreasing": _enhance_increasing_level_to_arg, + "Contrast": _enhance_level_to_arg, + "ContrastIncreasing": _enhance_increasing_level_to_arg, + "Brightness": _enhance_level_to_arg, + "BrightnessIncreasing": _enhance_increasing_level_to_arg, + "Sharpness": _enhance_level_to_arg, + "SharpnessIncreasing": _enhance_increasing_level_to_arg, + "ShearX": _shear_level_to_arg, + "ShearY": _shear_level_to_arg, + "TranslateX": _translate_abs_level_to_arg, + "TranslateY": _translate_abs_level_to_arg, + "TranslateXRel": _translate_rel_level_to_arg, + "TranslateYRel": _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + "AutoContrast": auto_contrast, + "Equalize": equalize, + "Invert": invert, + "Rotate": rotate, + "Posterize": posterize, + "PosterizeIncreasing": posterize, + "PosterizeOriginal": posterize, + "Solarize": solarize, + "SolarizeIncreasing": solarize, + "SolarizeAdd": solarize_add, + "Color": color, + "ColorIncreasing": color, + "Contrast": contrast, + "ContrastIncreasing": contrast, + "Brightness": brightness, + "BrightnessIncreasing": brightness, + "Sharpness": sharpness, + "SharpnessIncreasing": sharpness, + "ShearX": shear_x, + "ShearY": shear_y, + "TranslateX": translate_x_abs, + "TranslateY": translate_y_abs, + "TranslateXRel": translate_x_rel, + "TranslateYRel": translate_y_rel, +} + + +class AugmentOp: + """ + Apply for video. + """ + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = { + "fillcolor": hparams["img_mean"] + if "img_mean" in hparams + else _FILL, + "resample": hparams["interpolation"] + if "interpolation" in hparams + else _RANDOM_INTERPOLATION, + } + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + self.magnitude_std = self.hparams.get("magnitude_std", 0) + + def __call__(self, img_list): + if self.prob < 1.0 and random.random() > self.prob: + return img_list + magnitude = self.magnitude + if self.magnitude_std and self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range + level_args = ( + self.level_fn(magnitude, self.hparams) + if self.level_fn is not None + else () + ) + + if isinstance(img_list, list): + return [ + self.aug_fn(img, *level_args, **self.kwargs) for img in img_list + ] + else: + return self.aug_fn(img_list, *level_args, **self.kwargs) + + +_RAND_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "Posterize", + "Solarize", + "SolarizeAdd", + "Color", + "Contrast", + "Brightness", + "Sharpness", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +_RAND_INCREASING_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "PosterizeIncreasing", + "SolarizeIncreasing", + "SolarizeAdd", + "ColorIncreasing", + "ContrastIncreasing", + "BrightnessIncreasing", + "SharpnessIncreasing", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + "Rotate": 0.3, + "ShearX": 0.2, + "ShearY": 0.2, + "TranslateXRel": 0.1, + "TranslateYRel": 0.1, + "Color": 0.025, + "Sharpness": 0.025, + "AutoContrast": 0.025, + "Solarize": 0.005, + "SolarizeAdd": 0.005, + "Contrast": 0.005, + "Brightness": 0.005, + "Equalize": 0.005, + "Posterize": 0, + "Invert": 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [ + AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) + for name in transforms + ] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, + self.num_layers, + replace=self.choice_weights is None, + p=self.choice_weights, + ) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + + Create a RandAugment transform + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + :return: A PyTorch compatible Transform + """ + magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split("-") + assert config[0] == "rand" + config = config[1:] + for c in config: + cs = re.split(r"(\d.*)", c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == "mstd": + # noise param injected via hparams for now + hparams.setdefault("magnitude_std", float(val)) + elif key == "inc": + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == "m": + magnitude = int(val) + elif key == "n": + num_layers = int(val) + elif key == "w": + weight_idx = int(val) + else: + assert NotImplementedError + ra_ops = rand_augment_ops( + magnitude=magnitude, hparams=hparams, transforms=transforms + ) + choice_weights = ( + None if weight_idx is None else _select_rand_weights(weight_idx) + ) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/random_erasing.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/random_erasing.py new file mode 100644 index 0000000..b46547b --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/random_erasing.py @@ -0,0 +1,173 @@ +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py +pulished under an Apache License 2.0. +""" +import math +import random +import torch + + +def _get_pixels( + per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda" +): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty( + (patch_size[0], 1, 1), dtype=dtype, device=device + ).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, + min_area=0.02, + max_area=1 / 3, + min_aspect=0.3, + max_aspect=None, + mode="const", + min_count=1, + max_count=None, + num_splits=0, + device="cuda", + cube=True, + ): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + self.cube = cube + if mode == "rand": + self.rand_color = True # per block random normal + elif mode == "pixel": + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == "const" + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(10): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top : top + h, left : left + w] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def _erase_cube( + self, + img, + batch_start, + batch_size, + chan, + img_h, + img_w, + dtype, + ): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(100): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + for i in range(batch_start, batch_size): + img_instance = img[i] + img_instance[ + :, top : top + h, left : left + w + ] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = ( + batch_size // self.num_splits if self.num_splits > 1 else 0 + ) + if self.cube: + self._erase_cube( + input, + batch_start, + batch_size, + chan, + img_h, + img_w, + input.dtype, + ) + else: + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/utils.py new file mode 100644 index 0000000..66ca90d --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/utils.py @@ -0,0 +1,535 @@ +import io +import os +import math +import time +import json +from collections import defaultdict, deque +import datetime +import numpy as np +from timm.utils import get_state_dict +from torch.utils.data._utils.collate import default_collate +from pathlib import Path +import subprocess +import torch +import torch.distributed as dist +from torch._six import inf +import random + +from tensorboardX import SummaryWriter + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +class TensorboardLogger(object): + def __init__(self, log_dir): + self.writer = SummaryWriter(logdir=log_dir) + self.step = 0 + + def set_step(self, step=None): + if step is not None: + self.step = step + else: + self.step += 1 + + def update(self, head='scalar', step=None, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step) + + def flush(self): + self.writer.flush() + +def seed_worker(worker_id): + worker_seed = torch.initial_seed() % 2**32 + np.random.seed(worker_seed) + random.seed(worker_seed) + +def _load_checkpoint_for_ema(model_ema, checkpoint): + """ + Workaround for ModelEma._load_checkpoint to accept an already-loaded object + """ + mem_file = io.BytesIO() + torch.save(checkpoint, mem_file) + mem_file.seek(0) + model_ema._load_checkpoint(mem_file) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = int(os.environ['SLURM_LOCALID']) + args.world_size = int(os.environ['SLURM_NTASKS']) + os.environ['RANK'] = str(args.rank) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['WORLD_SIZE'] = str(args.world_size) + + node_list = os.environ['SLURM_NODELIST'] + addr = subprocess.getoutput( + f'scontrol show hostname {node_list} | head -n1') + if 'MASTER_ADDR' not in os.environ: + os.environ['MASTER_ADDR'] = addr + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + # assert torch.distributed.is_initialized() + setup_for_distributed(args.rank == 0) + + +def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"): + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get( + prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(model, prefix=prefix) + + warn_missing_keys = [] + ignore_missing_keys = [] + for key in missing_keys: + keep_flag = True + for ignore_key in ignore_missing.split('|'): + if ignore_key in key: + keep_flag = False + break + if keep_flag: + warn_missing_keys.append(key) + else: + ignore_missing_keys.append(key) + + missing_keys = warn_missing_keys + + if len(missing_keys) > 0: + print("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + print("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(ignore_missing_keys) > 0: + print("Ignored weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, ignore_missing_keys)) + if len(error_msgs) > 0: + print('\n'.join(error_msgs)) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + return total_norm + + +def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, + start_warmup_value=0, warmup_steps=-1): + warmup_schedule = np.array([]) + warmup_iters = warmup_epochs * niter_per_ep + if warmup_steps > 0: + warmup_iters = warmup_steps + print("Set warmup steps = %d" % warmup_iters) + if warmup_epochs > 0: + warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) + + iters = np.arange(epochs * niter_per_ep - warmup_iters) + schedule = np.array( + [final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters]) + + schedule = np.concatenate((warmup_schedule, schedule)) + + assert len(schedule) == epochs * niter_per_ep + return schedule + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + if loss_scaler is not None: + checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, + } + + if model_ema is not None: + to_save['model_ema'] = get_state_dict(model_ema) + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + if model_ema is not None: + client_state['model_ema'] = get_state_dict(model_ema) + model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) + + +def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): + output_dir = Path(args.output_dir) + if loss_scaler is not None: + # torch.amp + if args.auto_resume and len(args.resume) == 0: + import glob + all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) + latest_ckpt = -1 + for ckpt in all_checkpoints: + t = ckpt.split('-')[-1].split('.')[0] + if t.isdigit(): + latest_ckpt = max(int(t), latest_ckpt) + if latest_ckpt >= 0: + args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt) + print("Auto resume checkpoint: %s" % args.resume) + + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + print("Resume checkpoint %s" % args.resume) + if 'optimizer' in checkpoint and 'epoch' in checkpoint: + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + if hasattr(args, 'model_ema') and args.model_ema: + _load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + print("With optim & sched!") + else: + # deepspeed, only support '--auto_resume'. + if args.auto_resume: + import glob + all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*')) + latest_ckpt = -1 + for ckpt in all_checkpoints: + t = ckpt.split('-')[-1].split('.')[0] + if t.isdigit(): + latest_ckpt = max(int(t), latest_ckpt) + if latest_ckpt >= 0: + args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt) + print("Auto resume checkpoint: %d" % latest_ckpt) + _, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt) + args.start_epoch = client_states['epoch'] + 1 + if model_ema is not None: + if args.model_ema: + _load_checkpoint_for_ema(model_ema, client_states['model_ema']) + + +def create_ds_config(args): + args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json") + with open(args.deepspeed_config, mode="w") as writer: + ds_config = { + "train_batch_size": args.batch_size * args.update_freq * get_world_size(), + "train_micro_batch_size_per_gpu": args.batch_size, + "steps_per_print": 1000, + "optimizer": { + "type": "Adam", + "adam_w_mode": True, + "params": { + "lr": args.lr, + "weight_decay": args.weight_decay, + "bias_correction": True, + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-8 + } + }, + "fp16": { + "enabled": True, + "loss_scale": 0, + "initial_scale_power": 7, + "loss_scale_window": 128 + } + } + + writer.write(json.dumps(ds_config, indent=2)) + +def multiple_samples_collate(batch, fold=False): + """ + Collate function for repeated augmentation. Each instance in the batch has + more than one sample. + Args: + batch (tuple or list): data batch to collate. + Returns: + (tuple): collated data batch. + """ + inputs, labels, video_idx, extra_data = zip(*batch) + inputs = [item for sublist in inputs for item in sublist] + labels = [item for sublist in labels for item in sublist] + video_idx = [item for sublist in video_idx for item in sublist] + inputs, labels, video_idx, extra_data = ( + default_collate(inputs), + default_collate(labels), + default_collate(video_idx), + default_collate(extra_data), + ) + if fold: + return [inputs], labels, video_idx, extra_data + else: + return inputs, labels, video_idx, extra_data diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/video_transforms.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/video_transforms.py new file mode 100644 index 0000000..c7c9ad9 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/video_transforms.py @@ -0,0 +1,1281 @@ +#!/usr/bin/env python3 +import math +import numpy as np +import random +import torch +import torchvision.transforms.functional as F +from PIL import Image +from torchvision import transforms + +from .rand_augment import rand_augment_transform +from .random_erasing import RandomErasing + + +import numbers +import PIL +import torchvision + +from . import functional as FF + +_pil_interpolation_to_str = { + Image.NEAREST: "PIL.Image.NEAREST", + Image.BILINEAR: "PIL.Image.BILINEAR", + Image.BICUBIC: "PIL.Image.BICUBIC", + Image.LANCZOS: "PIL.Image.LANCZOS", + Image.HAMMING: "PIL.Image.HAMMING", + Image.BOX: "PIL.Image.BOX", +} + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _pil_interp(method): + if method == "bicubic": + return Image.BICUBIC + elif method == "lanczos": + return Image.LANCZOS + elif method == "hamming": + return Image.HAMMING + else: + return Image.BILINEAR + + +def random_short_side_scale_jitter( + images, min_size, max_size, boxes=None, inverse_uniform_sampling=False +): + """ + Perform a spatial short scale jittering on the given images and + corresponding boxes. + Args: + images (tensor): images to perform scale jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + min_size (int): the minimal size to scale the frames. + max_size (int): the maximal size to scale the frames. + boxes (ndarray): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, max_scale]. + Returns: + (tensor): the scaled images with dimension of + `num frames` x `channel` x `new height` x `new width`. + (ndarray or None): the scaled boxes with dimension of + `num boxes` x 4. + """ + if inverse_uniform_sampling: + size = int( + round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) + ) + else: + size = int(round(np.random.uniform(min_size, max_size))) + + height = images.shape[2] + width = images.shape[3] + if (width <= height and width == size) or ( + height <= width and height == size + ): + return images, boxes + new_width = size + new_height = size + if width < height: + new_height = int(math.floor((float(height) / width) * size)) + if boxes is not None: + boxes = boxes * float(new_height) / height + else: + new_width = int(math.floor((float(width) / height) * size)) + if boxes is not None: + boxes = boxes * float(new_width) / width + + return ( + torch.nn.functional.interpolate( + images, + size=(new_height, new_width), + mode="bilinear", + align_corners=False, + ), + boxes, + ) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Peform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to peform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def random_crop(images, size, boxes=None): + """ + Perform random spatial crop on the given images and corresponding boxes. + Args: + images (tensor): images to perform random crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): the size of height and width to crop on the image. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + cropped (tensor): cropped images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + if images.shape[2] == size and images.shape[3] == size: + return images + height = images.shape[2] + width = images.shape[3] + y_offset = 0 + if height > size: + y_offset = int(np.random.randint(0, height - size)) + x_offset = 0 + if width > size: + x_offset = int(np.random.randint(0, width - size)) + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + + return cropped, cropped_boxes + + +def horizontal_flip(prob, images, boxes=None): + """ + Perform horizontal flip on the given images and corresponding boxes. + Args: + prob (float): probility to flip the images. + images (tensor): images to perform horizontal flip, the dimension is + `num frames` x `channel` x `height` x `width`. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + images (tensor): images with dimension of + `num frames` x `channel` x `height` x `width`. + flipped_boxes (ndarray or None): the flipped boxes with dimension of + `num boxes` x 4. + """ + if boxes is None: + flipped_boxes = None + else: + flipped_boxes = boxes.copy() + + if np.random.uniform() < prob: + images = images.flip((-1)) + + if len(images.shape) == 3: + width = images.shape[2] + elif len(images.shape) == 4: + width = images.shape[3] + else: + raise NotImplementedError("Dimension does not supported") + if boxes is not None: + flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 + + return images, flipped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +def clip_boxes_to_image(boxes, height, width): + """ + Clip an array of boxes to an image with the given height and width. + Args: + boxes (ndarray): bounding boxes to perform clipping. + Dimension is `num boxes` x 4. + height (int): given image height. + width (int): given image width. + Returns: + clipped_boxes (ndarray): the clipped boxes with dimension of + `num boxes` x 4. + """ + clipped_boxes = boxes.copy() + clipped_boxes[:, [0, 2]] = np.minimum( + width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) + ) + clipped_boxes[:, [1, 3]] = np.minimum( + height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) + ) + return clipped_boxes + + +def blend(images1, images2, alpha): + """ + Blend two images with a given weight alpha. + Args: + images1 (tensor): the first images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + images2 (tensor): the second images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + alpha (float): the blending weight. + Returns: + (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + return images1 * alpha + images2 * (1 - alpha) + + +def grayscale(images): + """ + Get the grayscale for the input images. The channels of images should be + in order BGR. + Args: + images (tensor): the input images for getting grayscale. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + img_gray (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + # R -> 0.299, G -> 0.587, B -> 0.114. + img_gray = torch.tensor(images) + gray_channel = ( + 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] + ) + img_gray[:, 0] = gray_channel + img_gray[:, 1] = gray_channel + img_gray[:, 2] = gray_channel + return img_gray + + +def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): + """ + Perfrom a color jittering on the input images. The channels of images + should be in order BGR. + Args: + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + img_brightness (float): jitter ratio for brightness. + img_contrast (float): jitter ratio for contrast. + img_saturation (float): jitter ratio for saturation. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + + jitter = [] + if img_brightness != 0: + jitter.append("brightness") + if img_contrast != 0: + jitter.append("contrast") + if img_saturation != 0: + jitter.append("saturation") + + if len(jitter) > 0: + order = np.random.permutation(np.arange(len(jitter))) + for idx in range(0, len(jitter)): + if jitter[order[idx]] == "brightness": + images = brightness_jitter(img_brightness, images) + elif jitter[order[idx]] == "contrast": + images = contrast_jitter(img_contrast, images) + elif jitter[order[idx]] == "saturation": + images = saturation_jitter(img_saturation, images) + return images + + +def brightness_jitter(var, images): + """ + Perfrom brightness jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for brightness. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_bright = torch.zeros(images.shape) + images = blend(images, img_bright, alpha) + return images + + +def contrast_jitter(var, images): + """ + Perfrom contrast jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for contrast. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_gray = grayscale(images) + img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) + images = blend(images, img_gray, alpha) + return images + + +def saturation_jitter(var, images): + """ + Perfrom saturation jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for saturation. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + img_gray = grayscale(images) + images = blend(images, img_gray, alpha) + + return images + + +def lighting_jitter(images, alphastd, eigval, eigvec): + """ + Perform AlexNet-style PCA jitter on the given images. + Args: + images (tensor): images to perform lighting jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + alphastd (float): jitter ratio for PCA jitter. + eigval (list): eigenvalues for PCA jitter. + eigvec (list[list]): eigenvectors for PCA jitter. + Returns: + out_images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if alphastd == 0: + return images + # generate alpha1, alpha2, alpha3. + alpha = np.random.normal(0, alphastd, size=(1, 3)) + eig_vec = np.array(eigvec) + eig_val = np.reshape(eigval, (1, 3)) + rgb = np.sum( + eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), + axis=1, + ) + out_images = torch.zeros_like(images) + if len(images.shape) == 3: + # C H W + channel_dim = 0 + elif len(images.shape) == 4: + # T C H W + channel_dim = 1 + else: + raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") + + for idx in range(images.shape[channel_dim]): + # C H W + if len(images.shape) == 3: + out_images[idx] = images[idx] + rgb[2 - idx] + # T C H W + elif len(images.shape) == 4: + out_images[:, idx] = images[:, idx] + rgb[2 - idx] + else: + raise NotImplementedError( + f"Unsupported dimension {len(images.shape)}" + ) + + return out_images + + +def color_normalization(images, mean, stddev): + """ + Perform color nomration on the given images. + Args: + images (tensor): images to perform color normalization. Dimension is + `num frames` x `channel` x `height` x `width`. + mean (list): mean values for normalization. + stddev (list): standard deviations for normalization. + + Returns: + out_images (tensor): the noramlized images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if len(images.shape) == 3: + assert ( + len(mean) == images.shape[0] + ), "channel mean not computed properly" + assert ( + len(stddev) == images.shape[0] + ), "channel stddev not computed properly" + elif len(images.shape) == 4: + assert ( + len(mean) == images.shape[1] + ), "channel mean not computed properly" + assert ( + len(stddev) == images.shape[1] + ), "channel stddev not computed properly" + else: + raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") + + out_images = torch.zeros_like(images) + for idx in range(len(mean)): + # C H W + if len(images.shape) == 3: + out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] + elif len(images.shape) == 4: + out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] + else: + raise NotImplementedError( + f"Unsupported dimension {len(images.shape)}" + ) + return out_images + + +def _get_param_spatial_crop( + scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False +): + """ + Given scale, ratio, height and width, return sampled coordinates of the videos. + """ + for _ in range(num_repeat): + area = height * width + target_area = random.uniform(*scale) * area + if log_scale: + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + else: + aspect_ratio = random.uniform(*ratio) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if np.random.uniform() < 0.5 and switch_hw: + w, h = h, w + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + +def random_resized_crop( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + Crop the given images to random size and aspect ratio. A crop of random + size (default: of 0.08 to 1.0) of the original size and a random aspect + ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This + crop is finally resized to given size. This is popularly used to train the + Inception networks. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + cropped = images[:, :, i : i + h, j : j + w] + return torch.nn.functional.interpolate( + cropped, + size=(target_height, target_width), + mode="bilinear", + align_corners=False, + ) + + +def random_resized_crop_with_shift( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + This is similar to random_resized_crop. However, it samples two different + boxes (for cropping) for the first and last frame. It then linearly + interpolates the two boxes for other frames. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + t = images.shape[1] + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) + i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] + j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] + h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] + w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] + out = torch.zeros((3, t, target_height, target_width)) + for ind in range(t): + out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate( + images[ + :, + ind : ind + 1, + i_s[ind] : i_s[ind] + h_s[ind], + j_s[ind] : j_s[ind] + w_s[ind], + ], + size=(target_height, target_width), + mode="bilinear", + align_corners=False, + ) + return out + + +def create_random_augment( + input_size, + auto_augment=None, + interpolation="bilinear", +): + """ + Get video randaug transform. + + Args: + input_size: The size of the input video in tuple. + auto_augment: Parameters for randaug. An example: + "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number + of operations to apply). + interpolation: Interpolation method. + """ + if isinstance(input_size, tuple): + img_size = input_size[-2:] + else: + img_size = input_size + + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = {"translate_const": int(img_size_min * 0.45)} + if interpolation and interpolation != "random": + aa_params["interpolation"] = _pil_interp(interpolation) + if auto_augment.startswith("rand"): + return transforms.Compose( + [rand_augment_transform(auto_augment, aa_params)] + ) + raise NotImplementedError + + +def random_sized_crop_img( + im, + size, + jitter_scale=(0.08, 1.0), + jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), + max_iter=10, +): + """ + Performs Inception-style cropping (used for training). + """ + assert ( + len(im.shape) == 3 + ), "Currently only support image for random_sized_crop" + h, w = im.shape[1:3] + i, j, h, w = _get_param_spatial_crop( + scale=jitter_scale, + ratio=jitter_aspect, + height=h, + width=w, + num_repeat=max_iter, + log_scale=False, + switch_hw=True, + ) + cropped = im[:, i : i + h, j : j + w] + return torch.nn.functional.interpolate( + cropped.unsqueeze(0), + size=(size, size), + mode="bilinear", + align_corners=False, + ).squeeze(0) + + +# The following code are modified based on timm lib, we will replace the following +# contents with dependency from PyTorchVideo. +# https://github.com/facebookresearch/pytorchvideo +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation="bilinear", + ): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + print("range should be of kind (min, max)") + + if interpolation == "random": + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for _ in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = " ".join( + [_pil_interpolation_to_str[x] for x in self.interpolation] + ) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + "(size={0}".format(self.size) + format_string += ", scale={0}".format( + tuple(round(s, 4) for s in self.scale) + ) + format_string += ", ratio={0}".format( + tuple(round(r, 4) for r in self.ratio) + ) + format_string += ", interpolation={0})".format(interpolate_str) + return format_string + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0.0, + color_jitter=0.4, + auto_augment=None, + interpolation="random", + use_prefetcher=False, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + re_prob=0.0, + re_mode="const", + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + if isinstance(img_size, tuple): + img_size = img_size[-2:] + else: + img_size = img_size + + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple( + ratio or (3.0 / 4.0, 4.0 / 3.0) + ) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation( + img_size, scale=scale, ratio=ratio, interpolation=interpolation + ) + ] + if hflip > 0.0: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.0: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != "random": + aa_params["interpolation"] = _pil_interp(interpolation) + if auto_augment.startswith("rand"): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith("augmix"): + raise NotImplementedError("Augmix not implemented") + else: + raise NotImplementedError("Auto aug not implemented") + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), + ] + if re_prob > 0.0: + final_tfl.append( + RandomErasing( + re_prob, + mode=re_mode, + max_count=re_count, + num_splits=re_num_splits, + device="cpu", + cube=False, + ) + ) + + if separate: + return ( + transforms.Compose(primary_tfl), + transforms.Compose(secondary_tfl), + transforms.Compose(final_tfl), + ) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + +############################################################################################################ +############################################################################################################ + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = FF.resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = FF.resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ThreeCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w != im_w and h != im_h: + clip = FF.resize_clip(clip, self.size, interpolation="bilinear") + im_h, im_w, im_c = clip[0].shape + + step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0) + cropped = [] + for i in range(3): + if (im_h > self.size[0]): + x1 = 0 + y1 = i * step + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + else: + x1 = i * step + y1 = 0 + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + return cropped + + +class RandomRotation(object): + """Rotate entire clip randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + import skimage + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img transform function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This transform acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip. + """ + return FF.normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/volume_transforms.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/volume_transforms.py new file mode 100644 index 0000000..4d33dad --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/videomae/volume_transforms.py @@ -0,0 +1,131 @@ +import numpy as np +from PIL import Image +import torch + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format + """ + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255.0 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(tensor_clip, 255) + return tensor_clip + + +# Note this norms data to -1/1 +class ClipToTensor_K(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = (np_clip - 127.5) / 127.5 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor + """ + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_PREVALENT.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_PREVALENT.py new file mode 100755 index 0000000..3b9b60e --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_PREVALENT.py @@ -0,0 +1,447 @@ +# PREVALENT, 2020, weituo.hao@duke.edu +# Modified in Recurrent VLN-BERT, 2020, Yicong.Hong@anu.edu.au + +from __future__ import absolute_import, division, print_function, unicode_literals + +import json +import logging +import math +import os +import sys +from io import open + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss, MSELoss + +from pytorch_transformers import BertPreTrainedModel, BertConfig +import pdb + +logger = logging.getLogger(__name__) + +def gelu(x): + """Implementation of the gelu activation function. + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + Also see https://arxiv.org/abs/1606.08415 + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +try: + from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm +except (ImportError, AttributeError) as e: + logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") +BertLayerNorm = torch.nn.LayerNorm + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None, position_ids=None): + seq_length = input_ids.size(1) + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.output_attentions = True + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask, head_mask=None): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask, head_mask=None): + self_outputs = self.self(input_tensor, attention_mask, head_mask) + attention_output = self.output(self_outputs[0], input_tensor) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, head_mask=None): + attention_outputs = self.attention(hidden_states, attention_mask, head_mask) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertXAttention(nn.Module): + def __init__(self, config, ctx_dim=None): + super().__init__() + self.att = BertOutAttention(config, ctx_dim=ctx_dim) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None): + output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask) + attention_output = self.output(output, input_tensor) + return attention_output, attention_scores + + +class BertOutAttention(nn.Module): + def __init__(self, config, ctx_dim=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + # visual_dim = 2048 + if ctx_dim is None: + ctx_dim =config.hidden_size + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(ctx_dim, self.all_head_size) + self.value = nn.Linear(ctx_dim, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, context, attention_mask=None): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(context) + mixed_value_layer = self.value(context) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + if attention_mask is not None: + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer, attention_scores + + +class LXRTXLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + # Lang self-att and FFN layer + self.lang_self_att = BertAttention(config) + self.lang_inter = BertIntermediate(config) + self.lang_output = BertOutput(config) + # Visn self-att and FFN layer + self.visn_self_att = BertAttention(config) + self.visn_inter = BertIntermediate(config) + self.visn_output = BertOutput(config) + # The cross attention layer + self.visual_attention = BertXAttention(config) + + def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask): + ''' Cross Attention -- cross for vision not for language ''' + visn_att_output, attention_scores = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask) + return visn_att_output, attention_scores + + def self_att(self, visn_input, visn_attention_mask): + ''' Self Attention -- on visual features with language clues ''' + visn_att_output = self.visn_self_att(visn_input, visn_attention_mask) + return visn_att_output + + def output_fc(self, visn_input): + ''' Feed forward ''' + visn_inter_output = self.visn_inter(visn_input) + visn_output = self.visn_output(visn_inter_output, visn_input) + return visn_output + + def forward(self, lang_feats, lang_attention_mask, + visn_feats, visn_attention_mask, tdx): + + ''' visual self-attention with state ''' + visn_att_output = torch.cat((lang_feats[:, 0:1, :], visn_feats), dim=1) + state_vis_mask = torch.cat((lang_attention_mask[:,:,:,0:1], visn_attention_mask), dim=-1) + + ''' state and vision attend to language ''' + visn_att_output, cross_attention_scores = self.cross_att(lang_feats[:, 1:, :], lang_attention_mask[:, :, :, 1:], visn_att_output, state_vis_mask) + + language_attention_scores = cross_attention_scores[:, :, 0, :] + + state_visn_att_output = self.self_att(visn_att_output, state_vis_mask) + state_visn_output = self.output_fc(state_visn_att_output[0]) + + visn_att_output = state_visn_output[:, 1:, :] + lang_att_output = torch.cat((state_visn_output[:, 0:1, :], lang_feats[:,1:,:]), dim=1) + + visual_attention_scores = state_visn_att_output[1][:, :, 0, 1:] + + return lang_att_output, visn_att_output, language_attention_scores, visual_attention_scores + + +class VisionEncoder(nn.Module): + def __init__(self, vision_size, config): + super().__init__() + feat_dim = vision_size + # Object feature encoding + self.visn_fc = nn.Linear(feat_dim, config.hidden_size) + self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, visn_input): + feats = visn_input + x = self.visn_fc(feats) + x = self.visn_layer_norm(x) + output = self.dropout(x) + return output + + +class VLNBert(BertPreTrainedModel): + def __init__(self, config): + super(VLNBert, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.pooler = BertPooler(config) + + self.img_dim = config.img_feature_dim # 2176 + logger.info('VLNBert Image Dimension: {}'.format(self.img_dim)) + self.img_feature_type = config.img_feature_type # '' + self.vl_layers = config.vl_layers # 4 + self.la_layers = config.la_layers # 9 + self.lalayer = nn.ModuleList( + [BertLayer(config) for _ in range(self.la_layers)]) + self.addlayer = nn.ModuleList( + [LXRTXLayer(config) for _ in range(self.vl_layers)]) + # self.vision_encoder = VisionEncoder(self.config.img_feature_dim, self.config) + # self.apply(self.init_weights) + self.init_weights() + # del self.img_dim + # del self.vision_encoder + # del self.embeddings + + def forward(self, mode, input_ids, token_type_ids=None, + attention_mask=None, lang_mask=None, vis_mask=None, + position_ids=None, head_mask=None, img_feats=None): + + attention_mask = lang_mask + + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + head_mask = [None] * self.config.num_hidden_layers + + if mode == 'language': + ''' LXMERT language branch (in VLN only perform this at initialization) ''' + if input_ids.shape[-1] == 768: # rxr instruction + text_embeds = input_ids + else: # r2r instruction + embedding_output = self.embeddings(input_ids, + position_ids=position_ids, token_type_ids=token_type_ids) + text_embeds = embedding_output + + for layer_module in self.lalayer: + temp_output = layer_module(text_embeds, extended_attention_mask) + text_embeds = temp_output[0] + + sequence_output = text_embeds + pooled_output = self.pooler(sequence_output) + + return pooled_output, sequence_output + + + elif mode == 'visual': + ''' LXMERT visual branch (no language processing during navigation) ''' + text_embeds = input_ids + text_mask = extended_attention_mask + + # img_embedding_output = self.vision_encoder(img_feats) + img_embedding_output = img_feats + img_seq_len = img_feats.shape[1] + batch_size = text_embeds.size(0) + + img_seq_mask = vis_mask + + extended_img_mask = img_seq_mask.unsqueeze(1).unsqueeze(2) + extended_img_mask = extended_img_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_img_mask = (1.0 - extended_img_mask) * -10000.0 + img_mask = extended_img_mask + + lang_output = text_embeds + visn_output = img_embedding_output + + for tdx, layer_module in enumerate(self.addlayer): + lang_output, visn_output, language_attention_scores, visual_attention_scores = layer_module(lang_output, text_mask, visn_output, img_mask, tdx) + + sequence_output = lang_output + pooled_output = self.pooler(sequence_output) + + visual_action_scores = visual_attention_scores.mean(dim=1) + + return pooled_output, visual_action_scores diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_init.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_init.py new file mode 100755 index 0000000..c39bae6 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/models/vlnbert/vlnbert_init.py @@ -0,0 +1,19 @@ +# Recurrent VLN-BERT, 2020, by Yicong.Hong@anu.edu.au + +from pytorch_transformers import (BertConfig, BertTokenizer) + +def get_vlnbert_models(config=None): + config_class = BertConfig + + from vlnce_baselines.models.vlnbert.vlnbert_PREVALENT import VLNBert + model_class = VLNBert + # model_name_or_path = 'data/mydata/snap/VLNBERT-PREVALENT-final/state_dict/best_val_unseen' + model_name_or_path = 'pretrained/Prevalent/pretrained_model/pytorch_model.bin' + vis_config = config_class.from_pretrained('pretrained/Prevalent/bert-base-uncased') + vis_config.img_feature_dim = 2176 + vis_config.img_feature_type = "" + vis_config.vl_layers = 4 + vis_config.la_layers = 9 + visual_model = model_class.from_pretrained(model_name_or_path, config=vis_config) + + return visual_model diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/trainer_HAMT.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/trainer_HAMT.py new file mode 100755 index 0000000..e449574 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/trainer_HAMT.py @@ -0,0 +1,1001 @@ +import gc +import os +import io +import sys +import random +import warnings +from collections import defaultdict +from typing import Dict, List +import jsonlines + +import lmdb +import msgpack_numpy +import numpy as np +import math +import time +import torch +import torch.nn.functional as F +from torch.autograd import Variable +from torch.nn.parallel import DistributedDataParallel as DDP + +import tqdm +from gym import Space +from habitat import Config, logger +from habitat_baselines.common.baseline_registry import baseline_registry +from habitat_baselines.common.environments import get_env_class +from habitat_baselines.common.obs_transformers import ( + apply_obs_transforms_batch, + apply_obs_transforms_obs_space, + get_active_obs_transforms, +) +from habitat_baselines.common.tensorboard_utils import TensorboardWriter +from habitat_baselines.utils.common import batch_obs + +from vlnce_baselines.common.aux_losses import AuxLosses +from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer +from vlnce_baselines.common.env_utils import construct_envs, construct_envs_for_rl, is_slurm_batch_job +from vlnce_baselines.common.utils import extract_instruction_tokens +from vlnce_baselines.utils import reduce_loss + +from habitat.utils.visualizations.utils import images_to_video + +from .utils import get_camera_orientations12 +from .utils import ( + length2mask, dir_angle_feature_with_ele, +) +from vlnce_baselines.common.utils import dis_to_con, gather_list_and_concat +from habitat_extensions.measures import NDTW +from fastdtw import fastdtw + +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=FutureWarning) + import tensorflow as tf # noqa: F401 + +import torch.distributed as distr +import gzip +import json +from copy import deepcopy +from torch.cuda.amp import autocast, GradScaler + +@baseline_registry.register_trainer(name="HAMT") +class RLTrainer(BaseVLNCETrainer): + def __init__(self, config=None): + super().__init__(config) + self.max_len = int(config.IL.max_traj_len) # * 0.97 transfered gt path got 0.96 spl + + #check ceph things + if config.CEPH_IO: + from petrel_client.client import Client + conf_path = '~/petreloss.conf' + self.client = Client(conf_path) + + + + def _make_dirs(self) -> None: + if self.config.local_rank == 0: + self._make_ckpt_dir() + # os.makedirs(self.lmdb_features_dir, exist_ok=True) + if self.config.EVAL.SAVE_RESULTS: + self._make_results_dir() + + def save_checkpoint(self, iteration: int,) -> None: + if not self.config.CEPH_IO: + torch.save( + obj={ + "state_dict": self.policy.state_dict(), + "config": self.config, + "optim_state": self.optimizer.state_dict(), + "iteration": iteration, + }, + f=os.path.join(self.config.CHECKPOINT_FOLDER, f"ckpt.iter{iteration}.pth"), + ) + else: + save_dict = { + "state_dict": self.policy.state_dict(), + "config": self.config, + "optim_state": self.optimizer.state_dict(), + "iteration": iteration, + } + path = os.path.join(self.config.CEPH_URL, f"ckpt.iter{iteration}.pth") + with io.BytesIO() as buffer: + torch.save(save_dict, buffer) + self.client.put(path, buffer.getvalue()) + + def _set_config(self): + self.split = self.config.TASK_CONFIG.DATASET.SPLIT + self.config.defrost() + self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.split + self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.split + self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['DISTANCE_TO_GOAL', 'NDTW'] # for RL reward + self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = self.config.IL.max_traj_len + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1 + self.config.SIMULATOR_GPU_IDS = self.config.SIMULATOR_GPU_IDS[self.config.local_rank] + self.config.use_pbar = True # not is_slurm_batch_job() + ''' if choosing image ''' + resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES + crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS + config = self.config.TASK_CONFIG + camera_orientations = get_camera_orientations12() + for sensor_type in ["RGB", "DEPTH"]: + resizer_size = dict(resize_config)[sensor_type.lower()] + cropper_size = dict(crop_config)[sensor_type.lower()] + sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR") + for action, orient in camera_orientations.items(): + camera_template = f"{sensor_type}_{action}" + camera_config = deepcopy(sensor) + camera_config.ORIENTATION = camera_orientations[action] + camera_config.UUID = camera_template.lower() + setattr(config.SIMULATOR, camera_template, camera_config) + config.SIMULATOR.AGENT_0.SENSORS.append(camera_template) + resize_config.append((camera_template.lower(), resizer_size)) + crop_config.append((camera_template.lower(), cropper_size)) + self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config + self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config + self.config.TASK_CONFIG = config + self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS + + if self.config.IL.progress_monitor == True: + self.config.MODEL.progress_monitor = True + self.config.MODEL.max_len = self.config.IL.max_text_len + else: + self.config.MODEL.progress_monitor = False + + self.config.freeze() + + + + self.world_size = self.config.GPU_NUMBERS + self.local_rank = self.config.local_rank + self.batch_size = self.config.IL.batch_size + torch.cuda.set_device(self.device) + if self.world_size > 1: + distr.init_process_group(backend='nccl', init_method='env://') + self.device = self.config.TORCH_GPU_IDS[self.local_rank] + self.config.defrost() + self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank] + self.config.freeze() + torch.cuda.set_device(self.device) + + def _init_envs(self): + self.config.defrost() + self.config.TASK_CONFIG.SEED = self.config.TASK_CONFIG.SEED + self.local_rank + self.config.freeze() + + self.envs = construct_envs( + self.config, + get_env_class(self.config.ENV_NAME), + auto_reset_done=False + ) + env_num = self.envs.num_envs + dataset_len = sum(self.envs.number_of_episodes) + logger.info(f'LOCAL RANK: {self.local_rank}, ENV NUM: {env_num}, DATASET LEN: {dataset_len}') + observation_space = self.envs.observation_spaces[0] + action_space = self.envs.action_spaces[0] + self.obs_transforms = get_active_obs_transforms(self.config) + observation_space = apply_obs_transforms_obs_space( + observation_space, self.obs_transforms + ) + + return observation_space, action_space + + def _get_iter(self, x): + x_iter = int(x.split('.')[-2][4:]) + return x_iter + + def _initialize_policy( + self, + config: Config, + load_from_ckpt: bool, + observation_space: Space, + action_space: Space, + ) -> int: + start_iter = 0 + policy = baseline_registry.get_policy(self.config.MODEL.policy_name) + self.policy = policy.from_config( + config=config, + observation_space=observation_space, + action_space=action_space, + ) + ''' initialize the waypoint predictor here ''' + from vlnce_baselines.waypoint_pred.TRM_net import BinaryDistPredictor_TRM + self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device) + self.waypoint_predictor.load_state_dict( + torch.load('pretrained/wp_pred/waypoint_predictor', map_location = torch.device('cpu'))['predictor']['state_dict'] + ) + for param in self.waypoint_predictor.parameters(): + param.requires_grad_(False) + + self.policy.to(self.device) + self.waypoint_predictor.to(self.device) + self.num_recurrent_layers = self.policy.net.num_recurrent_layers + + if self.config.GPU_NUMBERS > 1: + print('Using', self.config.GPU_NUMBERS,'GPU!') + # find_unused_parameters=False fix ddp bug + self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device], + output_device=self.device, find_unused_parameters=False, broadcast_buffers=False) + self.optimizer = torch.optim.AdamW(self.policy.parameters(), lr=self.config.IL.lr) + + if config.IL.resume: + import glob + if not self.config.CEPH_IO: + ckpt_list = list(filter(os.path.isfile, glob.glob(config.CHECKPOINT_FOLDER + "/*")) ) + else: + ckpt_list = [os.path.join(self.config.CEPH_URL,p) for p in self.client.list(self.config.CEPH_URL)] + + ckpt_list.sort(key=self._get_iter) + + if len(ckpt_list) > 0: + config.defrost() + config.IL.ckpt_to_load = ckpt_list[-1] + load_from_ckpt = True + config.IL.is_requeue = True + config.freeze() + else: + load_from_ckpt = False + + if load_from_ckpt: + ckpt_path = config.IL.ckpt_to_load + + if not self.config.CEPH_IO: + ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu") + else: + with io.BytesIO(self.client.get(ckpt_path)) as buffer: + ckpt_dict = torch.load(buffer, map_location="cpu") + + if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1: + self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device), + device_ids=[self.device], output_device=self.device) + self.policy.load_state_dict(ckpt_dict["state_dict"], strict=False) + self.policy.net = self.policy.net.module + self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device), + device_ids=[self.device], output_device=self.device) + else: + self.policy.load_state_dict(ckpt_dict["state_dict"], strict=False) + if config.IL.is_requeue: + start_iter = ckpt_dict["iteration"] + self.optimizer.load_state_dict(ckpt_dict["optim_state"]) + logger.info(f"Loaded weights from checkpoint: {ckpt_path}, iteration: {start_iter}") + + params = sum(param.numel() for param in self.policy.parameters()) + params_t = sum( + p.numel() for p in self.policy.parameters() if p.requires_grad + ) + logger.info(f"Agent parameters: {params/1e6:.2f} MB. Trainable: {params_t/1e6:.2f} MB.") + logger.info("Finished setting up policy.") + + return start_iter + + def _teacher_action(self, batch_angles, batch_distances, candidate_lengths): + if self.config.MODEL.task_type == 'r2r': + cand_dists_to_goal = [[] for _ in range(len(batch_angles))] + oracle_cand_idx = [] + for j in range(len(batch_angles)): + for k in range(len(batch_angles[j])): + angle_k = batch_angles[j][k] + forward_k = batch_distances[j][k] + dist_k = self.envs.call_at(j, "cand_dist_to_goal", {"angle": angle_k, "forward": forward_k}) + cand_dists_to_goal[j].append(dist_k) + curr_dist_to_goal = self.envs.call_at(j, "current_dist_to_goal") + # if within target range (which def as 3.0) + if curr_dist_to_goal < 1.5: + oracle_cand_idx.append(candidate_lengths[j] - 1) + else: + oracle_cand_idx.append(np.argmin(cand_dists_to_goal[j])) + return oracle_cand_idx + elif self.config.MODEL.task_type == 'rxr': + kargs = [] + current_episodes = self.envs.current_episodes() + for i in range(self.envs.num_envs): + kargs.append({ + 'ref_path':self.gt_data[str(current_episodes[i].episode_id)]['locations'], + 'angles':batch_angles[i], + 'distances':batch_distances[i], + 'candidate_length':candidate_lengths[i] + }) + + outputs = self.envs.call(["get_cand_idx"]*self.envs.num_envs,kargs) + oracle_cand_idx, progresses = [list(x) for x in zip(*outputs)] + return oracle_cand_idx, progresses + + def _cand_pano_feature_variable(self, obs): + batch_size = len(obs['cand_angles']) + ob_cand_lens = [len(x)+1 for x in obs['cand_angles']] # +1 is for the end + ob_lens = [] + ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, ob_nav_types = [], [], [], [], [] + # Note: The candidate_feat at len(ob['candidate']) is the feature for the END + # which is zero in my implementation + for i in range(batch_size): + cand_nav_types = [] + cand_idxes = np.zeros(12, dtype=np.bool) + cand_idxes[obs['cand_img_idxes'][i]] = True + # cand + cand_rgb_fts = obs['cand_rgb'][i] + cand_dep_fts = obs['cand_depth'][i] + cand_ang_fts = obs['cand_angle_fts'][i] + cand_dis_fts = obs['cand_dis_fts'][i] + cand_nav_types += [1] * cand_ang_fts.shape[0] + # stop + stop_rgb_fts = torch.zeros([1, 768]) + # stop_rgb_fts = torch.zeros([1, 2048]) + stop_dep_fts = torch.zeros([1, 128]) + stop_ang_fts = torch.zeros([1, 4]) + stop_dis_fts = torch.zeros([1, 4]) + cand_nav_types += [2] + # pano context + pano_rgb_fts = obs['pano_rgb'][i][~cand_idxes] + pano_dep_fts = obs['pano_depth'][i][~cand_idxes] + pano_ang_fts = obs['pano_angle_fts'][~cand_idxes] + pano_dis_fts = obs['pano_dis_fts'][~cand_idxes] + cand_nav_types += [0] * (12-np.sum(cand_idxes)) + + cand_pano_rgb = torch.cat([cand_rgb_fts, stop_rgb_fts, pano_rgb_fts], dim=0) + cand_pano_dep = torch.cat([cand_dep_fts, stop_dep_fts, pano_dep_fts], dim=0) + cand_pano_ang = torch.cat([cand_ang_fts, stop_ang_fts, pano_ang_fts], dim=0) + cand_pano_dis = torch.cat([cand_dis_fts, stop_dis_fts, pano_dis_fts], dim=0) + ob_rgb_fts.append(cand_pano_rgb) + ob_dep_fts.append(cand_pano_dep) + ob_ang_fts.append(cand_pano_ang) + ob_dis_fts.append(cand_pano_dis) + ob_nav_types.append(cand_nav_types) + ob_lens.append(len(cand_nav_types)) + + # pad features to max_len + max_len = max(ob_lens) + for i in range(batch_size): + num_pads = max_len - ob_lens[i] + ob_rgb_fts[i] = torch.cat([ob_rgb_fts[i], torch.zeros(num_pads, 768)], dim=0) + # ob_rgb_fts[i] = torch.cat([ob_rgb_fts[i], torch.zeros(num_pads, 2048)], dim=0) + ob_dep_fts[i] = torch.cat([ob_dep_fts[i], torch.zeros(num_pads, 128)], dim=0) + ob_ang_fts[i] = torch.cat([ob_ang_fts[i], torch.zeros(num_pads, 4)], dim=0) + ob_dis_fts[i] = torch.cat([ob_dis_fts[i], torch.zeros(num_pads, 4)], dim=0) + ob_nav_types[i] = np.array(ob_nav_types[i] + [0]*num_pads) + + ob_rgb_fts = torch.stack(ob_rgb_fts, dim=0).cuda() + ob_dep_fts = torch.stack(ob_dep_fts, dim=0).cuda() + ob_ang_fts = torch.stack(ob_ang_fts, dim=0).cuda() + ob_dis_fts = torch.stack(ob_dis_fts, dim=0).cuda() + ob_nav_types = torch.from_numpy(np.stack(ob_nav_types, 0)).cuda() + + return ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, ob_nav_types, ob_lens, ob_cand_lens + + def _history_variable(self, obs): + batch_size = obs['pano_rgb'].shape[0] + hist_rgb_fts = obs['pano_rgb'][:, 0, ...].cuda() + hist_depth_fts = obs['pano_depth'][:, 0, ...].cuda() + hist_pano_rgb_fts = obs['pano_rgb'].cuda() + hist_pano_depth_fts = obs['pano_depth'].cuda() + hist_pano_ang_fts = obs['pano_angle_fts'].unsqueeze(0).expand(batch_size, -1, -1).cuda() + + return hist_rgb_fts, hist_depth_fts, hist_pano_rgb_fts, hist_pano_depth_fts, hist_pano_ang_fts + + @staticmethod + def _pause_envs(envs, batch, envs_to_pause, *args): + if len(envs_to_pause) > 0: + state_index = list(range(envs.num_envs)) + for idx in reversed(envs_to_pause): + state_index.pop(idx) + envs.pause_at(idx) + for arg in args: + arg.pop(idx) + + for k, v in batch.items(): + if k != 'video_rgbs': + batch[k] = v[state_index] + # else: + # batch[k] = [v[state_i] for state_i in state_index] + + return envs, batch + + @torch.no_grad() + def _eval_checkpoint( + self, + checkpoint_path: str, + writer: TensorboardWriter, + checkpoint_index: int = 0, + ) -> None: + if self.local_rank < 1: + logger.info(f"checkpoint_path: {checkpoint_path}") + + if self.config.EVAL.USE_CKPT_CONFIG: + config = self._setup_eval_config( + self.load_checkpoint(checkpoint_path, map_location="cpu")["config"] + ) + else: + config = self.config.clone() + config.defrost() + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1 + config.IL.ckpt_to_load = checkpoint_path + if config.IL.progress_monitor == True: + config.MODEL.progress_monitor = True + config.MODEL.max_len = config.IL.max_text_len + else: + config.MODEL.progress_monitor = False + config.freeze() + + if config.EVAL.SAVE_RESULTS: + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json", + ) + if os.path.exists(fname): + print("skipping -- evaluation exists.") + return + envs = construct_envs( + config, + get_env_class(config.ENV_NAME), + auto_reset_done=False, # unseen: 11006 + ) + dataset_length = sum(envs.number_of_episodes) + print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length) + + obs_transforms = get_active_obs_transforms(config) + observation_space = apply_obs_transforms_obs_space( + envs.observation_spaces[0], obs_transforms + ) + self._initialize_policy( + config, + load_from_ckpt=True, + observation_space=observation_space, + action_space=envs.action_spaces[0], + ) + self.policy.eval() + self.waypoint_predictor.eval() + + state_episodes = {} + if config.EVAL.EPISODE_COUNT == -1: + episodes_to_eval = sum(envs.number_of_episodes) + else: + episodes_to_eval = min( + config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes) + ) + pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None + + while len(state_episodes) < episodes_to_eval: + envs.resume_all() + observations = envs.reset() + instr_max_len = self.config.IL.max_text_len # r2r 80, rxr 200 + instr_pad_id = 1 if self.config.MODEL.task_type == 'rxr' else 0 + observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + max_length=instr_max_len, pad_id=instr_pad_id) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + keys = ['rgb', 'rgb_30', 'rgb_60', 'rgb_90', 'rgb_120', 'rgb_150', 'rgb_180', 'rgb_210', 'rgb_240', 'rgb_270', 'rgb_300', 'rgb_330'] + # states = [[envs.call_at(i,"get_agent_state",{})] for i in range(envs.num_envs)] + history_images = [{k:observations[i][k][None,...] for k in keys} for i in range(envs.num_envs)] + video_inputs = [{k:observations[i][k][None,...].repeat(16,0) for k in keys} for i in range(envs.num_envs)] + batch['video_rgbs'] = video_inputs + + envs_to_pause = [i for i, ep in enumerate(envs.current_episodes()) if ep.episode_id in state_episodes] + envs, batch = self._pause_envs(envs, batch, envs_to_pause, history_images, video_inputs) + + if envs.num_envs == 0: + break + + # encode instructions + all_txt_ids = batch['instruction'] + all_txt_masks = (all_txt_ids != instr_pad_id) + all_txt_embeds = self.policy.net( + mode='language', + txt_ids=all_txt_ids, + txt_masks=all_txt_masks, + ) + + not_done_index = list(range(envs.num_envs)) + hist_lens = np.ones(envs.num_envs, dtype=np.int64) + hist_embeds = [self.policy.net('history').expand(envs.num_envs, -1)] + for stepk in range(self.max_len): + txt_embeds = all_txt_embeds[not_done_index] + txt_masks = all_txt_masks[not_done_index] + + # cand waypoint prediction + wp_outputs = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, \ + ob_nav_types, ob_lens, ob_cand_lens = self._cand_pano_feature_variable(wp_outputs) + + ob_masks = length2mask(ob_lens).logical_not() + + # navigation + visual_inputs = { + 'mode': 'navigation', + 'txt_embeds': txt_embeds, + 'txt_masks': txt_masks, + 'hist_embeds': hist_embeds, # history before t step + 'hist_lens': hist_lens, + 'ob_rgb_fts': ob_rgb_fts, + 'ob_dep_fts': ob_dep_fts, + 'ob_ang_fts': ob_ang_fts, + 'ob_dis_fts': ob_dis_fts, + 'ob_nav_types': ob_nav_types, + 'ob_masks': ob_masks, + 'return_states': False, + } + t_outputs = self.policy.net(**visual_inputs) + logits = t_outputs[0] + + # sample action + a_t = logits.argmax(dim=-1, keepdim=True) + cpu_a_t = a_t.squeeze(1).cpu().numpy() + + # update history + if stepk != self.max_len-1: + hist_rgb_fts, hist_depth_fts, hist_pano_rgb_fts, hist_pano_depth_fts, hist_pano_ang_fts = self._history_variable(wp_outputs) + prev_act_ang_fts = torch.zeros([envs.num_envs, 4]).cuda() + for i, next_id in enumerate(cpu_a_t): + prev_act_ang_fts[i] = ob_ang_fts[i, next_id] + t_hist_inputs = { + 'mode': 'history', + 'hist_rgb_fts': hist_rgb_fts, + 'hist_depth_fts': hist_depth_fts, + 'hist_ang_fts': prev_act_ang_fts, + 'hist_pano_rgb_fts': hist_pano_rgb_fts, + 'hist_pano_depth_fts': hist_pano_depth_fts, + 'hist_pano_ang_fts': hist_pano_ang_fts, + 'ob_step': stepk, + } + t_hist_embeds = self.policy.net(**t_hist_inputs) + hist_embeds.append(t_hist_embeds) + hist_lens = hist_lens + 1 + + # make equiv action + env_actions = [] + for j in range(envs.num_envs): + if cpu_a_t[j].item()==ob_cand_lens[j]-1 or stepk==self.max_len-1: + env_actions.append({'action':{'action': 0, 'action_args':{}}}) + else: + t_angle = wp_outputs['cand_angles'][j][cpu_a_t[j]] + if self.config.EVAL.ANG30: + t_angle = round(t_angle / math.radians(30)) * math.radians(30) + t_distance = wp_outputs['cand_distances'][j][cpu_a_t[j]] + env_actions.append({'action':{'action': 4, 'action_args':{'angle': t_angle, 'distance': t_distance, + 'niu1niu': not self.config.TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING} + } + }) + outputs = envs.step(env_actions) + observations, _, dones, infos = [list(x) for x in zip(*outputs)] + for j, ob in enumerate(observations): + if env_actions[j]['action']['action'] == 0: + continue + else: + envs.call_at( + j, 'change_current_path', # to update and record low-level path + {'new_path': ob.pop('positions'), + 'collisions': ob.pop('collisions')} + ) + + # calculate metric + current_episodes = envs.current_episodes() + for i in range(envs.num_envs): + if not dones[i]: + continue + info = infos[i] + metric = {} + metric['steps_taken'] = info['steps_taken'] + ep_id = str(current_episodes[i].episode_id) + gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float) + if 'current_path' in current_episodes[i].info.keys(): + positions_ = np.array(current_episodes[i].info['current_path']).astype(np.float) + collisions_ = np.array(current_episodes[i].info['collisions']) + assert collisions_.shape[0] == positions_.shape[0] - 1 + else: + positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float) + distance = np.array(info['position']['distance']).astype(np.float) + metric['distance_to_goal'] = distance[-1] + metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0. + metric['oracle_success'] = 1. if (distance <= 3.).any() else 0. + metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum() + if collisions_.size == 0: + metric['collisions'] = 0 + else: + metric['collisions'] = collisions_.mean() + gt_length = distance[0] + metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length']) + act_con_path = positions_ + gt_con_path = np.array(dis_to_con(gt_path)).astype(np.float) + dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0] + nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE)) + metric['ndtw'] = nDTW + state_episodes[current_episodes[i].episode_id] = metric + + if len(state_episodes)%300 == 0: + aggregated_states = {} + num_episodes = len(state_episodes) + for stat_key in next(iter(state_episodes.values())).keys(): + aggregated_states[stat_key] = ( + sum(v[stat_key] for v in state_episodes.values()) / num_episodes + ) + print(aggregated_states) + + if config.use_pbar: + pbar.update() + + # pause env + if sum(dones) > 0: + for i in reversed(list(range(envs.num_envs))): + if dones[i]: + not_done_index.pop(i) + envs.pause_at(i) + observations.pop(i) + video_inputs.pop(i) + history_images.pop(i) + if envs.num_envs == 0: + break + + for i in range(len(observations)): + states_i = observations[i].pop('states') + # states[i] += states_i + new_images_i = {k:[] for k in keys} + for position, rotation in states_i[:-1]: + new_image = envs.call_at(i,'get_pano_rgbs_observations_at', {'source_position':position,'source_rotation':rotation}) + for k in keys: + new_images_i[k].append(new_image[k][None,...]) + for k in keys: + new_images_i[k].append(observations[i][k][None,...]) + history_images[i][k] = np.vstack((history_images[i][k], np.vstack(new_images_i[k]))) + if len(history_images[i][k]) < 16: + video_inputs[i][k][16-len(history_images[i][k]):] = history_images[i][k] + else: + video_inputs[i][k] = history_images[i][k][-16:] + # print(i,stepk,len(new_images_i[k])) + position, rotation = states_i[-1] + envs.call_at(i,'set_agent_state', {'position':position,'rotation':rotation}) + + hist_lens = hist_lens[np.array(dones)==False] + for j in range(len(hist_embeds)): + hist_embeds[j] = hist_embeds[j][np.array(dones)==False] + observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + batch['video_rgbs'] = video_inputs + + envs.close() + if self.world_size > 1: + distr.barrier() + aggregated_states = {} + num_episodes = len(state_episodes) + for stat_key in next(iter(state_episodes.values())).keys(): + aggregated_states[stat_key] = ( + sum(v[stat_key] for v in state_episodes.values()) / num_episodes + ) + print(aggregated_states) + total = torch.tensor(num_episodes).cuda() + if self.world_size > 1: + distr.reduce(total,dst=0) + total = total.item() + + if self.world_size > 1: + logger.info(f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_states}") + for k,v in aggregated_states.items(): + v = torch.tensor(v*num_episodes).cuda() + cat_v = gather_list_and_concat(v,self.world_size) + v = (sum(cat_v)/total).item() + aggregated_states[k] = v + + split = config.TASK_CONFIG.DATASET.SPLIT + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json", + ) + if config.EVAL.SAVE_RESULTS: + with open(fname, "w") as f: + json.dump(state_episodes, f, indent=4) + + if self.local_rank < 1: + if config.EVAL.SAVE_RESULTS: + fname = os.path.join( + config.RESULTS_DIR, + f"stats_ckpt_{checkpoint_index}_{split}.json", + ) + with open(fname, "w") as f: + json.dump(aggregated_states, f, indent=4) + + logger.info(f"Episodes evaluated: {total}") + + if config.EVAL.SAVE_RESULTS: + checkpoint_num = int(checkpoint_index[4:]) + for k, v in aggregated_states.items(): + logger.info(f"Average episode {k}: {v:.6f}") + if config.EVAL.SAVE_RESULTS: + writer.add_scalar(f"eval_{k}/{split}", v, checkpoint_num) + + @torch.no_grad() + def inference(self) -> None: + checkpoint_path = self.config.INFERENCE.CKPT_PATH + logger.info(f"checkpoint_path: {checkpoint_path}") + self.config.defrost() + self.config.IL.ckpt_to_load = checkpoint_path + self.config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT + self.config.TASK_CONFIG.DATASET.ROLES = ["guide"] + self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.INFERENCE.LANGUAGES + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False + self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1 + if self.config.IL.progress_monitor == True: + self.config.MODEL.progress_monitor = True + self.config.MODEL.max_len = config.IL.max_text_len + else: + self.config.MODEL.progress_monitor = False + self.config.TASK_CONFIG.TASK.MEASUREMENTS = [] + self.config.TASK_CONFIG.TASK.SENSORS = [s for s in self.config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s] + self.config.SIMULATOR_GPU_IDS = [self.config.SIMULATOR_GPU_IDS[self.config.local_rank]] + if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS: + idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW') + self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFERENCE' + # if choosing image + resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES + crop_config = self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS + config = self.config.TASK_CONFIG + camera_orientations = get_camera_orientations12() + for sensor_type in ["RGB", "DEPTH"]: + resizer_size = dict(resize_config)[sensor_type.lower()] + cropper_size = dict(crop_config)[sensor_type.lower()] + sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR") + for action, orient in camera_orientations.items(): + camera_template = f"{sensor_type}_{action}" + camera_config = deepcopy(sensor) + camera_config.ORIENTATION = camera_orientations[action] + camera_config.UUID = camera_template.lower() + setattr(config.SIMULATOR, camera_template, camera_config) + config.SIMULATOR.AGENT_0.SENSORS.append(camera_template) + resize_config.append((camera_template.lower(), resizer_size)) + crop_config.append((camera_template.lower(), cropper_size)) + self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config + self.config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = crop_config + self.config.TASK_CONFIG = config + self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS + self.config.INFERENCE.EPISODE_COUNT = -1 + self.config.INFERENCE.ANG30 = False + self.config.freeze() + + if self.config.INFERENCE.USE_CKPT_CONFIG: + config = self._setup_eval_config( + self.load_checkpoint(checkpoint_path, map_location="cpu")["config"] + ) + else: + config = self.config.clone() + + envs = construct_envs( + config, + get_env_class(config.ENV_NAME), + auto_reset_done=False, + ) + + obs_transforms = get_active_obs_transforms(config) + observation_space = apply_obs_transforms_obs_space( + envs.observation_spaces[0], obs_transforms + ) + + self._initialize_policy( + config, + load_from_ckpt=True, + observation_space=observation_space, + action_space=envs.action_spaces[0], + ) + self.policy.eval() + self.waypoint_predictor.eval() + + episode_predictions = defaultdict(list) + # episode ID --> instruction ID for rxr predictions format + instruction_ids: Dict[str, int] = {} + + if config.INFERENCE.EPISODE_COUNT == -1: + episodes_to_infer = sum(envs.number_of_episodes) + else: + episodes_to_infer = min( + config.INFERENCE.EPISODE_COUNT, sum(envs.number_of_episodes) + ) + pbar = tqdm.tqdm(total=episodes_to_infer) + + while len(episode_predictions) < episodes_to_infer: + envs.resume_all() + observations = envs.reset() + instr_max_len = self.config.IL.max_text_len # r2r 80, rxr 300 + instr_pad_id = 1 if self.config.MODEL.task_type == 'rxr' else 0 + observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, + max_length=instr_max_len, pad_id=instr_pad_id) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + + keys = ['rgb', 'rgb_30', 'rgb_60', 'rgb_90', 'rgb_120', 'rgb_150', 'rgb_180', 'rgb_210', 'rgb_240', 'rgb_270', 'rgb_300', 'rgb_330'] + # states = [[envs.call_at(i,"get_agent_state",{})] for i in range(envs.num_envs)] + history_images = [{k:observations[i][k][None,...] for k in keys} for i in range(envs.num_envs)] + video_inputs = [{k:observations[i][k][None,...].repeat(16,0) for k in keys} for i in range(envs.num_envs)] + batch['video_rgbs'] = video_inputs + + envs_to_pause = [i for i, ep in enumerate(envs.current_episodes()) if ep.episode_id in episode_predictions] + envs, batch = self._pause_envs(envs, batch, envs_to_pause, history_images, video_inputs) + if envs.num_envs == 0: + break + + # init predicitos start point + current_episodes = envs.current_episodes() + for i in range(envs.num_envs): + episode_predictions[current_episodes[i].episode_id].append( + envs.call_at(i, "get_agent_info", {}) + ) + if config.INFERENCE.FORMAT == "rxr": + ep_id = current_episodes[i].episode_id + k = current_episodes[i].instruction.instruction_id + instruction_ids[ep_id] = int(k) + + # encode instructions + all_txt_ids = batch['instruction'] + all_txt_masks = (all_txt_ids != instr_pad_id) + all_txt_embeds = self.policy.net( + mode='language', + txt_ids=all_txt_ids, + txt_masks=all_txt_masks, + ) + + not_done_index = list(range(envs.num_envs)) + hist_lens = np.ones(envs.num_envs, dtype=np.int64) + hist_embeds = [self.policy.net('history').expand(envs.num_envs, -1)] + for stepk in range(self.max_len): + txt_embeds = all_txt_embeds[not_done_index] + txt_masks = all_txt_masks[not_done_index] + + # cand waypoint prediction + wp_outputs = self.policy.net( + mode = "waypoint", + waypoint_predictor = self.waypoint_predictor, + observations = batch, + in_train = False, + ) + ob_rgb_fts, ob_dep_fts, ob_ang_fts, ob_dis_fts, \ + ob_nav_types, ob_lens, ob_cand_lens = self._cand_pano_feature_variable(wp_outputs) + ob_masks = length2mask(ob_lens).logical_not() + + # navigation + visual_inputs = { + 'mode': 'navigation', + 'txt_embeds': txt_embeds, + 'txt_masks': txt_masks, + 'hist_embeds': hist_embeds, # history before t step + 'hist_lens': hist_lens, + 'ob_rgb_fts': ob_rgb_fts, + 'ob_dep_fts': ob_dep_fts, + 'ob_ang_fts': ob_ang_fts, + 'ob_dis_fts': ob_dis_fts, + 'ob_nav_types': ob_nav_types, + 'ob_masks': ob_masks, + 'return_states': False, + } + t_outputs = self.policy.net(**visual_inputs) + logits = t_outputs[0] + + # sample action + a_t = logits.argmax(dim=-1, keepdim=True) + cpu_a_t = a_t.squeeze(1).cpu().numpy() + + # update history + if stepk != self.max_len-1: + hist_rgb_fts, hist_depth_fts, hist_pano_rgb_fts, hist_pano_depth_fts, hist_pano_ang_fts = self._history_variable(wp_outputs) + prev_act_ang_fts = torch.zeros([envs.num_envs, 4]).cuda() + for i, next_id in enumerate(cpu_a_t): + prev_act_ang_fts[i] = ob_ang_fts[i, next_id] + t_hist_inputs = { + 'mode': 'history', + 'hist_rgb_fts': hist_rgb_fts, + 'hist_depth_fts': hist_depth_fts, + 'hist_ang_fts': prev_act_ang_fts, + 'hist_pano_rgb_fts': hist_pano_rgb_fts, + 'hist_pano_depth_fts': hist_pano_depth_fts, + 'hist_pano_ang_fts': hist_pano_ang_fts, + 'ob_step': stepk, + } + t_hist_embeds = self.policy.net(**t_hist_inputs) + hist_embeds.append(t_hist_embeds) + hist_lens = hist_lens + 1 + + # make equiv action + env_actions = [] + for j in range(envs.num_envs): + if cpu_a_t[j].item()==ob_cand_lens[j]-1 or stepk==self.max_len-1: + env_actions.append({'action':{'action': 0, 'action_args':{}}}) + else: + t_angle = wp_outputs['cand_angles'][j][cpu_a_t[j]] + if self.config.INFERENCE.ANG30: + t_angle = round(t_angle / math.radians(30)) * math.radians(30) + t_distance = wp_outputs['cand_distances'][j][cpu_a_t[j]] + env_actions.append({'action':{'action': 4, 'action_args':{'angle': t_angle, 'distance': t_distance, + 'niu1niu': not self.config.TASK_CONFIG.SIMULATOR.HABITAT_SIM_V0.ALLOW_SLIDING} + } + }) + outputs = envs.step(env_actions) + observations, _, dones, infos = [list(x) for x in zip(*outputs)] + for j, ob in enumerate(observations): + if env_actions[j]['action']['action'] == 0: + continue + else: + envs.call_at( + j, 'update_cur_path', {'new_path': ob.pop('cur_path')} + ) + + # record path + current_episodes = envs.current_episodes() + for i in range(envs.num_envs): + if not dones[i]: + continue + ep_id = current_episodes[i].episode_id + if 'cur_path' in current_episodes[i].info: + episode_predictions[ep_id] += current_episodes[i].info['cur_path'] + episode_predictions[ep_id][-1]['stop'] = True + pbar.update() + + # pause env + if sum(dones) > 0: + for i in reversed(list(range(envs.num_envs))): + if dones[i]: + not_done_index.pop(i) + envs.pause_at(i) + observations.pop(i) + video_inputs.pop(i) + history_images.pop(i) + if envs.num_envs == 0: + break + + for i in range(len(observations)): + states_i = observations[i].pop('states') + # states[i] += states_i + new_images_i = {k:[] for k in keys} + for position, rotation in states_i[:-1]: + new_image = envs.call_at(i,'get_pano_rgbs_observations_at', {'source_position':position,'source_rotation':rotation}) + for k in keys: + new_images_i[k].append(new_image[k][None,...]) + for k in keys: + new_images_i[k].append(observations[i][k][None,...]) + history_images[i][k] = np.vstack((history_images[i][k], np.vstack(new_images_i[k]))) + if len(history_images[i][k]) < 16: + video_inputs[i][k][16-len(history_images[i][k]):] = history_images[i][k] + else: + video_inputs[i][k] = history_images[i][k][-16:] + # print(i,stepk,len(new_images_i[k])) + position, rotation = states_i[-1] + envs.call_at(i,'set_agent_state', {'position':position,'rotation':rotation}) + + hist_lens = hist_lens[np.array(dones)==False] + for j in range(len(hist_embeds)): + hist_embeds[j] = hist_embeds[j][np.array(dones)==False] + observations = extract_instruction_tokens(observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID) + batch = batch_obs(observations, self.device) + batch = apply_obs_transforms_batch(batch, obs_transforms) + batch['video_rgbs'] = video_inputs + + envs.close() + if config.INFERENCE.FORMAT == "r2r": + with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f: + json.dump(episode_predictions, f, indent=2) + logger.info(f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}") + else: # use 'rxr' format for rxr-habitat leaderboard + predictions_out = [] + + for k,v in episode_predictions.items(): + + # save only positions that changed + path = [v[0]["position"]] + for p in v[1:]: + if path[-1] != p["position"]: + path.append(p["position"]) + + predictions_out.append( + { + "instruction_id": instruction_ids[k], + "path": path, + } + ) + + predictions_out.sort(key=lambda x: x["instruction_id"]) + with jsonlines.open( + config.INFERENCE.PREDICTIONS_FILE, mode="w" + ) as writer: + writer.write_all(predictions_out) + + logger.info( + f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}" + ) \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/utils.py new file mode 100755 index 0000000..c710d97 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/utils.py @@ -0,0 +1,223 @@ +import torch +import torch.distributed as dist +import numpy as np +import math +import copy + +class ARGS(): + def __init__(self): + self.local_rank = 0 + +def reduce_loss(tensor, rank, world_size): + with torch.no_grad(): + dist.reduce(tensor, dst=0) + if rank == 0: + # print(tensor) + tensor /= world_size + +def gather_list_and_concat(list_of_nums,world_size): + if not torch.is_tensor(list_of_nums): + tensor = torch.Tensor(list_of_nums).cuda() + else: + if list_of_nums.is_cuda == False: + tensor = list_of_nums.cuda() + else: + tensor = list_of_nums + gather_t = [torch.ones_like(tensor) for _ in + range(world_size)] + dist.all_gather(gather_t, tensor) + return gather_t + +def repeat_allocation(allocations, max_number): + if torch.is_tensor(max_number): + max_number = max_number.long().item() + else: + max_number = max_number.long() + allocation_number = len(allocations) + repeat_time, res = max_number // allocation_number, max_number % allocation_number + allocations_ = [] + for i in range(repeat_time): + allocations_ += copy.deepcopy(allocations) + allocations_ += copy.deepcopy(allocations)[:res] + + return allocations_ + + +def allocate(number, ep_length, size_per_time): + length_to_indexes = {ep_length[i]: [] for i in + range(len(ep_length))} + for i in range(len(ep_length)): + length_to_indexes[ep_length[i]] += [i]*number[i] + + values = [] + for i in range(len(number)): + values += [ep_length[i]] * number[i] + + groups = int((len(values) - 0.01) // size_per_time + 1) + + values.sort(reverse=True) + + load_balance_groups = [[] for grp in range(groups)] + + for v in values: + load_balance_groups.sort(key=lambda x: sum(x)) + load_balance_groups[0].append(v) + + indexes = [] + set_length = list(set(ep_length)) + for i in range(groups): + index = np.zeros(len(load_balance_groups[i]),dtype=int) + for j in range(len(set_length)): + length_indexes = length_to_indexes[set_length[j]] + position = np.where(np.array(load_balance_groups[i]) == + set_length[j])[0] + position_length = len(position) + # print(position_length,j) + index[position] = length_indexes[:position_length] + # print(length_indexes) + length_to_indexes[set_length[j]] = length_indexes[position_length:] + indexes.append((index).tolist()) + + return indexes + +def allocate_instructions(instruction_lengths, allocations,ep_length, instruction_ids): + instruction_ids_copy = copy.deepcopy(instruction_ids) + allocations_copy = copy.deepcopy(allocations) + instruction_lengths_copy = copy.deepcopy(instruction_lengths) + values = [] + value_indexes = [] + weights = [] + for i in range(len(instruction_lengths)): + instruction_length = instruction_lengths[i] + values += instruction_length + value_indexes += len(instruction_length)*[i] + weights += [ep_length[i]] * len(instruction_length) + # values = np.array(values) + # value_indexes = np.array(value_indexes) + values = np.array(values) + weights = np.array(weights) + value_indexes = np.array(value_indexes) + sorted_index = np.argsort(values*weights)[::-1] + values = values[sorted_index] + value_indexes = value_indexes[sorted_index] + weights = weights[sorted_index] + + groups = len(allocations) + load_balance_groups = [[] for grp in range(groups)] + group_weights = [[] for grp in range(groups)] + instruction_allocations = [[] for grp in range(groups)] + for j in range(len(values)): + summation = np.array([np.sum(np.array(load_balance_groups[i])*np.array(group_weights[i])) for i in range(groups)]) + sorted_index = np.argsort(summation) + for i in sorted_index: + index = value_indexes[j] + value = values[j] + if index in allocations_copy[i]: + allocations_copy[i].remove(index) + load_balance_groups[i].append(value) + group_weights[i].append(weights[j]) + # check[i].append(index) + index_in_length = np.where(np.array(instruction_lengths_copy[index]) == value)[0][0] + instruction_lengths_copy[index].pop(index_in_length) + instruction_allocations[i].append(instruction_ids_copy[index].pop(index_in_length)) + break + + return instruction_allocations + + +def allocate_by_scene_for_ddp(number, ep_length, size_per_time): + length_to_indexes = {ep_length[i]: [] for i in + range(len(ep_length))} + for i in range(len(ep_length)): + length_to_indexes[ep_length[i]] += [i]*number[i] + + values = [] + for i in range(len(number)): + values += [ep_length[i]] * number[i] + + groups = int((len(values) - 0.01) // size_per_time + 1) + + values.sort(reverse=True) + + load_balance_groups = [[] for grp in range(groups)] + + for v in values: + load_balance_groups.sort(key=lambda x: sum(x)) + load_balance_groups[0].append(v) + + indexes = [] + set_length = list(set(ep_length)) + for i in range(groups): + index = np.zeros(len(load_balance_groups[i]),dtype=int) + for j in range(len(set_length)): + length_indexes = length_to_indexes[set_length[j]] + position = np.where(np.array(load_balance_groups[i]) == + set_length[j])[0] + position_length = len(position) + # print(position_length,j) + index[position] = length_indexes[:position_length] + # print(length_indexes) + length_to_indexes[set_length[j]] = length_indexes[position_length:] + indexes.append((index).tolist()) + + return indexes + + +def get_camera_orientations12(): + base_angle_deg = 30 + base_angle_rad = math.pi / 6 + orient_dict = {} + for k in range(1,12): + orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0] + return orient_dict + + +def get_camera_orientations24(): + base_angle_deg = 15 + base_angle_rad = math.pi / 12 + orient_dict = {} + for k in range(1,24): + orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0] + return orient_dict + + +def length2mask(length, size=None): + batch_size = len(length) + size = int(max(length)) if size is None else size + mask = (torch.arange(size, dtype=torch.int64).unsqueeze(0).repeat(batch_size, 1) + > (torch.LongTensor(length) - 1).unsqueeze(1)).cuda() + return mask + + +def dir_angle_feature(angle_list, device=None): + feature_dim = 64 + batch_size = len(angle_list) + max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop + heading_enc = torch.zeros( + batch_size, max_leng, feature_dim, dtype=torch.float32) + + for i in range(batch_size): + for j, angle_rad in enumerate(angle_list[i]): + heading_enc[i][j] = torch.tensor( + [math.sin(angle_rad), + math.cos(angle_rad)] * (feature_dim // 2)) + + return heading_enc + + +def dir_angle_feature_with_ele(angle_list, device=None): + feature_dim = 128 + batch_size = len(angle_list) + max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop + heading_enc = torch.zeros( + batch_size, max_leng, feature_dim, dtype=torch.float32) + + for i in range(batch_size): + for j, angle_rad in enumerate(angle_list[i]): + heading_enc[i][j] = torch.tensor( + [ + math.sin(angle_rad), math.cos(angle_rad), + math.sin(0.0), math.cos(0.0), # elevation + ] * (128 // 4)) + + return heading_enc \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/TRM_net.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/TRM_net.py new file mode 100755 index 0000000..d0cefdb --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/TRM_net.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +import numpy as np +import vlnce_baselines.waypoint_pred.utils as utils + +from .transformer.waypoint_bert import WaypointBert +from pytorch_transformers import BertConfig + +class BinaryDistPredictor_TRM(nn.Module): + def __init__(self, hidden_dim=768, n_classes=12, device=None): + super(BinaryDistPredictor_TRM, self).__init__() + + self.device = device + + self.num_angles = 120 + self.num_imgs = 12 + self.n_classes = 12 # num of distances + self.TRM_LAYER = 2 + self.TRM_NEIGHBOR = 1 + self.HEATMAP_OFFSET = 5 + + # self.visual_fc_rgb = nn.Sequential( + # nn.Flatten(), + # nn.Linear(np.prod([2048,7,7]), hidden_dim), + # nn.ReLU(True), + # ) + self.visual_fc_depth = nn.Sequential( + nn.Flatten(), + nn.Linear(np.prod([128,4,4]), hidden_dim), + nn.ReLU(True), + ) + self.visual_merge = nn.Sequential( + nn.Linear(hidden_dim*2, hidden_dim), + nn.ReLU(True), + ) + + config = BertConfig() + config.model_type = 'visual' + config.finetuning_task = 'waypoint_predictor' + config.hidden_dropout_prob = 0.3 + config.hidden_size = 768 + config.num_attention_heads = 12 + config.num_hidden_layers = self.TRM_LAYER + self.waypoint_TRM = WaypointBert(config=config) + + layer_norm_eps = config.layer_norm_eps + self.mergefeats_LayerNorm = BertLayerNorm( + hidden_dim, + eps=layer_norm_eps + ) + self.mask = utils.get_attention_mask( + num_imgs=self.num_imgs, + neighbor=self.TRM_NEIGHBOR).to(self.device) + + self.vis_classifier = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, + int(n_classes*(self.num_angles/self.num_imgs))), + ) + + def forward(self, rgb_feats, depth_feats): + bsi = rgb_feats.size(0) // self.num_imgs + + # rgb_x = self.visual_fc_rgb(rgb_feats).reshape( + # bsi, self.num_imgs, -1) + depth_x = self.visual_fc_depth(depth_feats).reshape( + bsi, self.num_imgs, -1) + # vis_x = self.visual_merge( + # torch.cat((rgb_x, depth_x), dim=-1) + # ) + vis_x = depth_x + + attention_mask = self.mask.repeat(bsi,1,1,1) + vis_rel_x = self.waypoint_TRM( + vis_x, attention_mask=attention_mask + ) + + vis_logits = self.vis_classifier(vis_rel_x) + vis_logits = vis_logits.reshape( + bsi, self.num_angles, self.n_classes) + + # heatmap offset (each image is pointing at the middle) + vis_logits = torch.cat( + (vis_logits[:,self.HEATMAP_OFFSET:,:], vis_logits[:,:self.HEATMAP_OFFSET,:]), + dim=1) + + return vis_logits + + +class BertLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/file_utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/file_utils.py new file mode 100755 index 0000000..fd655ce --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/file_utils.py @@ -0,0 +1,259 @@ +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" +from __future__ import (absolute_import, division, print_function, unicode_literals) + +import sys +import json +import logging +import os +import shutil +import tempfile +import fnmatch +from functools import wraps +from hashlib import sha256 +from io import open + +import boto3 +import requests +from botocore.exceptions import ClientError +from tqdm import tqdm + +try: + from torch.hub import _get_torch_home + torch_cache_home = _get_torch_home() +except ImportError: + torch_cache_home = os.path.expanduser( + os.getenv('TORCH_HOME', os.path.join( + os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))) +default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers') + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + +try: + from pathlib import Path + PYTORCH_PRETRAINED_BERT_CACHE = Path( + os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)) +except (AttributeError, ImportError): + PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + default_cache_path) + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +def url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + """ + url_bytes = url.encode('utf-8') + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode('utf-8') + etag_hash = sha256(etag_bytes) + filename += '.' + etag_hash.hexdigest() + + return filename + + +def filename_to_url(filename, cache_dir=None): + """ + Return the url and etag (which may be ``None``) stored for `filename`. + Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + cache_path = os.path.join(cache_dir, filename) + if not os.path.exists(cache_path): + raise EnvironmentError("file {} not found".format(cache_path)) + + meta_path = cache_path + '.json' + if not os.path.exists(meta_path): + raise EnvironmentError("file {} not found".format(meta_path)) + + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata['url'] + etag = metadata['etag'] + + return url, etag + + +def cached_path(url_or_filename, cache_dir=None): + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + parsed = urlparse(url_or_filename) + + if parsed.scheme in ('http', 'https', 's3'): + # URL, so get it from the cache (downloading if necessary) + return get_from_cache(url_or_filename, cache_dir) + elif os.path.exists(url_or_filename): + # File, and it exists. + return url_or_filename + elif parsed.scheme == '': + # File, but it doesn't exist. + raise EnvironmentError("file {} not found".format(url_or_filename)) + else: + # Something unknown + raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) + + +def split_s3_path(url): + """Split a full s3 path into the bucket name and path.""" + parsed = urlparse(url) + if not parsed.netloc or not parsed.path: + raise ValueError("bad s3 path {}".format(url)) + bucket_name = parsed.netloc + s3_path = parsed.path + # Remove '/' at beginning of path. + if s3_path.startswith("/"): + s3_path = s3_path[1:] + return bucket_name, s3_path + + +def s3_request(func): + """ + Wrapper function for s3 requests in order to create more helpful error + messages. + """ + + @wraps(func) + def wrapper(url, *args, **kwargs): + try: + return func(url, *args, **kwargs) + except ClientError as exc: + if int(exc.response["Error"]["Code"]) == 404: + raise EnvironmentError("file {} not found".format(url)) + else: + raise + + return wrapper + + +@s3_request +def s3_etag(url): + """Check ETag on S3 object.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_object = s3_resource.Object(bucket_name, s3_path) + return s3_object.e_tag + + +@s3_request +def s3_get(url, temp_file): + """Pull a file directly from S3.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) + + +def http_get(url, temp_file): + req = requests.get(url, stream=True) + content_length = req.headers.get('Content-Length') + total = int(content_length) if content_length is not None else None + progress = tqdm(unit="B", total=total) + for chunk in req.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + progress.update(len(chunk)) + temp_file.write(chunk) + progress.close() + + +def get_from_cache(url, cache_dir=None): + """ + Given a URL, look for the corresponding dataset in the local cache. + If it's not there, download it. Then return the path to the cached file. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + if sys.version_info[0] == 2 and not isinstance(cache_dir, str): + cache_dir = str(cache_dir) + + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + # Get eTag to add to filename, if it exists. + if url.startswith("s3://"): + etag = s3_etag(url) + else: + try: + response = requests.head(url, allow_redirects=True) + if response.status_code != 200: + etag = None + else: + etag = response.headers.get("ETag") + except EnvironmentError: + etag = None + + if sys.version_info[0] == 2 and etag is not None: + etag = etag.decode('utf-8') + filename = url_to_filename(url, etag) + + # get cache path to put the file + cache_path = os.path.join(cache_dir, filename) + + # If we don't have a connection (etag is None) and can't identify the file + # try to get the last downloaded one + if not os.path.exists(cache_path) and etag is None: + matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*') + matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files)) + if matching_files: + cache_path = os.path.join(cache_dir, matching_files[-1]) + + if not os.path.exists(cache_path): + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with tempfile.NamedTemporaryFile() as temp_file: + logger.info("%s not found in cache, downloading to %s", url, temp_file.name) + + # GET file object + if url.startswith("s3://"): + s3_get(url, temp_file) + else: + http_get(url, temp_file) + + # we are copying the file before closing it, so flush to avoid truncation + temp_file.flush() + # shutil.copyfileobj() starts at the current position, so go to the start + temp_file.seek(0) + + logger.info("copying %s to cache at %s", temp_file.name, cache_path) + with open(cache_path, 'wb') as cache_file: + shutil.copyfileobj(temp_file, cache_file) + + logger.info("creating metadata file for %s", cache_path) + meta = {'url': url, 'etag': etag} + meta_path = cache_path + '.json' + with open(meta_path, 'w') as meta_file: + output_string = json.dumps(meta) + if sys.version_info[0] == 2 and isinstance(output_string, str): + output_string = unicode(output_string, 'utf-8') # The beauty of python 2 + meta_file.write(output_string) + + logger.info("removing temp file %s", temp_file.name) + + return cache_path diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_bert.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_bert.py new file mode 100755 index 0000000..754b0ae --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_bert.py @@ -0,0 +1,1255 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model. """ + +from __future__ import absolute_import, division, print_function, unicode_literals + +import json +import logging +import math +import os +import sys +from io import open + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss, MSELoss + +from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel, + prune_linear_layer, add_start_docstrings) + +logger = logging.getLogger(__name__) + +BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", + 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", + 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", + 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", + 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", + 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", + 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", +} + +BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json", + 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json", + 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json", + 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json", + 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json", + 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json", + 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json", +} + + +def load_tf_weights_in_bert(model, config, tf_checkpoint_path): + """ Load tf checkpoints in a pytorch model. + """ + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in ["adam_v", "adam_m", "global_step"] for n in name): + logger.info("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + if l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + elif l[0] == 'squad': + pointer = getattr(pointer, 'classifier') + else: + try: + pointer = getattr(pointer, l[0]) + except AttributeError: + logger.info("Skipping {}".format("/".join(name))) + continue + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.transpose(array) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +def gelu(x): + """Implementation of the gelu activation function. + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + Also see https://arxiv.org/abs/1606.08415 + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +class BertConfig(PretrainedConfig): + r""" + :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a + `BertModel`. + + + Arguments: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu" and "swish" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `BertModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + layer_norm_eps: The epsilon used by LayerNorm. + """ + pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP + + def __init__(self, + vocab_size_or_config_json_file=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + **kwargs): + super(BertConfig, self).__init__(**kwargs) + if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 + and isinstance(vocab_size_or_config_json_file, unicode)): + with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + else: + raise ValueError("First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)") + + + +try: + from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm +except ImportError: + logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") + class BertLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None, position_ids=None): + seq_length = input_ids.size(1) + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.output_attentions = config.output_attentions + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768] + self.key = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768] + self.value = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768] + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask, head_mask=None): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def prune_heads(self, heads): + if len(heads) == 0: + return + mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) + for head in heads: + mask[head] = 0 + mask = mask.view(-1).contiguous().eq(1) + index = torch.arange(len(mask))[mask].long() + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + # Update hyper params + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + + def forward(self, input_tensor, attention_mask, head_mask=None): + self_outputs = self.self(input_tensor, attention_mask, head_mask) + attention_output = self.output(self_outputs[0], input_tensor) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, head_mask=None): + attention_outputs = self.attention(hidden_states, attention_mask, head_mask) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + +class BertEncoder(nn.Module): + def __init__(self, config): + super(BertEncoder, self).__init__() + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask, head_mask=None): + all_hidden_states = () + all_attentions = () + for i, layer_module in enumerate(self.layer): + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i]) + hidden_states = layer_outputs[0] + + if self.output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states,) + if self.output_attentions: + outputs = outputs + (all_attentions,) + return outputs # outputs, (hidden states), (attentions) + + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(BertPredictionHeadTransform, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super(BertLMPredictionHead, self).__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, + config.vocab_size, + bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super(BertOnlyMLMHead, self).__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super(BertOnlyNSPHead, self).__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config): + super(BertPreTrainingHeads, self).__init__() + self.predictions = BertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(PreTrainedModel): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + config_class = BertConfig + pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = "bert" + + def __init__(self, *inputs, **kwargs): + super(BertPreTrainedModel, self).__init__(*inputs, **kwargs) + + def init_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, BertLayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +BERT_START_DOCSTRING = r""" The BERT model was proposed in + `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ + by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer + pre-trained using a combination of masked language modeling objective and next sentence prediction + on a large corpus comprising the Toronto Book Corpus and Wikipedia. + + This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and + refer to the PyTorch documentation for all matter related to general usage and behavior. + + .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`: + https://arxiv.org/abs/1810.04805 + + .. _`torch.nn.Module`: + https://pytorch.org/docs/stable/nn.html#module + + Parameters: + config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model. +""" + +BERT_INPUTS_DOCSTRING = r""" + Inputs: + **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`. + See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and + :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range ``[0, config.max_position_embeddings - 1[``. + **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. +""" + +@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.", + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertModel(BertPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` + Sequence of hidden-states at the output of the last layer of the model. + **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` + Last layer hidden-state of the first token of the sequence (classification token) + further processed by a Linear layer and a Tanh activation function. The Linear + layer weights are trained from the next sentence prediction (classification) + objective during Bert pretraining. This output is usually *not* a good summary + of the semantic content of the input, you're often better with averaging or pooling + the sequence of hidden-states for the whole input sequence. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> model = BertModel(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids) + >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple + + """ + def __init__(self, config): + super(BertModel, self).__init__(config) + + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + self.pooler = BertPooler(config) + + self.apply(self.init_weights) + + def _resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.embeddings.word_embeddings + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.embeddings.word_embeddings = new_embeddings + return self.embeddings.word_embeddings + + def _prune_heads(self, heads_to_prune): + """ Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility + else: + head_mask = [None] * self.config.num_hidden_layers + + embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) + encoder_outputs = self.encoder(embedding_output, + extended_attention_mask, + head_mask=head_mask) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) + + outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here + return outputs # sequence_output, pooled_output, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training: + a `masked language modeling` head and a `next sentence prediction (classification)` head. """, + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertForPreTraining(BertPreTrainedModel): + r""" + **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the masked language modeling loss. + Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` + **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) + Indices should be in ``[0, 1]``. + ``0`` indicates sequence B is a continuation of sequence A, + ``1`` indicates sequence B is a random sequence. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: + Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. + **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForPreTraining(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids) + >>> prediction_scores, seq_relationship_scores = outputs[:2] + + """ + def __init__(self, config): + super(BertForPreTraining, self).__init__(config) + + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config) + + self.apply(self.init_weights) + self.tie_weights() + + def tie_weights(self): + """ Make sure we are sharing the input and output embeddings. + Export to TorchScript can't handle parameter sharing so we are cloning them instead. + """ + self._tie_or_clone_weights(self.cls.predictions.decoder, + self.bert.embeddings.word_embeddings) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, + next_sentence_label=None, position_ids=None, head_mask=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + + outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here + + if masked_lm_labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + outputs = (total_loss,) + outputs + + return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertForMaskedLM(BertPreTrainedModel): + r""" + **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the masked language modeling loss. + Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Masked language modeling loss. + **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForMaskedLM(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids, masked_lm_labels=input_ids) + >>> loss, prediction_scores = outputs[:2] + + """ + def __init__(self, config): + super(BertForMaskedLM, self).__init__(config) + + self.bert = BertModel(config) + self.cls = BertOnlyMLMHead(config) + + self.apply(self.init_weights) + self.tie_weights() + + def tie_weights(self): + """ Make sure we are sharing the input and output embeddings. + Export to TorchScript can't handle parameter sharing so we are cloning them instead. + """ + self._tie_or_clone_weights(self.cls.predictions.decoder, + self.bert.embeddings.word_embeddings) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, + position_ids=None, head_mask=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention is they are here + if masked_lm_labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + outputs = (masked_lm_loss,) + outputs + + return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """, + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertForNextSentencePrediction(BertPreTrainedModel): + r""" + **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) + Indices should be in ``[0, 1]``. + ``0`` indicates sequence B is a continuation of sequence A, + ``1`` indicates sequence B is a random sequence. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Next sequence prediction (classification) loss. + **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForNextSentencePrediction(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids) + >>> seq_relationship_scores = outputs[0] + + """ + def __init__(self, config): + super(BertForNextSentencePrediction, self).__init__(config) + + self.bert = BertModel(config) + self.cls = BertOnlyNSPHead(config) + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, + position_ids=None, head_mask=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask) + pooled_output = outputs[1] + + seq_relationship_score = self.cls(pooled_output) + + outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here + if next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + outputs = (next_sentence_loss,) + outputs + + return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks. """, + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertForSequenceClassification(BertPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the sequence classification/regression loss. + Indices should be in ``[0, ..., config.num_labels]``. + If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), + If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification (or regression if config.num_labels==1) loss. + **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` + Classification (or regression if config.num_labels==1) scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForSequenceClassification(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids, labels=labels) + >>> loss, logits = outputs[:2] + + """ + def __init__(self, config): + super(BertForSequenceClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + if labels is not None: + if self.num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), logits, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of + the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, + BERT_START_DOCSTRING) +class BertForMultipleChoice(BertPreTrainedModel): + r""" + Inputs: + **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + The second dimension of the input (`num_choices`) indicates the number of choices to score. + To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`. + See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and + :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + The second dimension of the input (`num_choices`) indicates the number of choices to score. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``: + Mask to avoid performing attention on padding token indices. + The second dimension of the input (`num_choices`) indicates the number of choices to score. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the multiple choice classification loss. + Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension + of the input tensors. (see `input_ids` above) + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension + of the input tensors. (see `input_ids` above). + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForMultipleChoice(config) + >>> choices = ["Hello, my dog is cute", "Hello, my cat is amazing"] + >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices + >>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids, labels=labels) + >>> loss, classification_scores = outputs[:2] + + """ + def __init__(self, config): + super(BertForMultipleChoice, self).__init__(config) + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None): + num_choices = input_ids.shape[1] + + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) + flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, head_mask=head_mask) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + outputs = (loss,) + outputs + + return outputs # (loss), reshaped_logits, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertForTokenClassification(BertPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the token classification loss. + Indices should be in ``[0, ..., config.num_labels]``. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForTokenClassification(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 + >>> outputs = model(input_ids, labels=labels) + >>> loss, scores = outputs[:2] + + """ + def __init__(self, config): + super(BertForTokenClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask) + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of + the hidden-states output to compute `span start logits` and `span end logits`). """, + BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) +class BertForQuestionAnswering(BertPreTrainedModel): + r""" + **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). + Position outside of the sequence are not taken into account for computing the loss. + **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). + Position outside of the sequence are not taken into account for computing the loss. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` + Span-start scores (before SoftMax). + **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` + Span-end scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> + >>> model = BertForQuestionAnswering(config) + >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + >>> start_positions = torch.tensor([1]) + >>> end_positions = torch.tensor([3]) + >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) + >>> loss, start_scores, end_scores = outputs[:2] + + """ + def __init__(self, config): + super(BertForQuestionAnswering, self).__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, + end_positions=None, position_ids=None, head_mask=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask) + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + outputs = (start_logits, end_logits,) + outputs[2:] + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + outputs = (total_loss,) + outputs + + return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_utils.py new file mode 100755 index 0000000..bcec77f --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/pytorch_transformer/modeling_utils.py @@ -0,0 +1,900 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model.""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import copy +import json +import logging +import os +from io import open + +import six +import torch +from torch import nn +from torch.nn import CrossEntropyLoss +from torch.nn import functional as F + +from .file_utils import cached_path + +logger = logging.getLogger(__name__) + +CONFIG_NAME = "config.json" +WEIGHTS_NAME = "pytorch_model.bin" +TF_WEIGHTS_NAME = 'model.ckpt' + + +try: + from torch.nn import Identity +except ImportError: + # Older PyTorch compatibility + class Identity(nn.Module): + r"""A placeholder identity operator that is argument-insensitive. + """ + def __init__(self, *args, **kwargs): + super(Identity, self).__init__() + + def forward(self, input): + return input + + +if not six.PY2: + def add_start_docstrings(*docstr): + def docstring_decorator(fn): + fn.__doc__ = ''.join(docstr) + fn.__doc__ + return fn + return docstring_decorator +else: + # Not possible to update class docstrings on python2 + def add_start_docstrings(*docstr): + def docstring_decorator(fn): + return fn + return docstring_decorator + + +class PretrainedConfig(object): + """ Base class for all configuration classes. + Handle a few common parameters and methods for loading/downloading/saving configurations. + """ + pretrained_config_archive_map = {} + + def __init__(self, **kwargs): + self.finetuning_task = kwargs.pop('finetuning_task', None) + self.num_labels = kwargs.pop('num_labels', 2) + self.output_attentions = kwargs.pop('output_attentions', False) + self.output_hidden_states = kwargs.pop('output_hidden_states', False) + self.torchscript = kwargs.pop('torchscript', False) + + def save_pretrained(self, save_directory): + """ Save a configuration object to a directory, so that it + can be re-loaded using the `from_pretrained(save_directory)` class method. + """ + assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" + + # If we save using the predefined names, we can load using `from_pretrained` + output_config_file = os.path.join(save_directory, CONFIG_NAME) + + self.to_json_file(output_config_file) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" Instantiate a PretrainedConfig from a pre-trained model configuration. + + Params: + **pretrained_model_name_or_path**: either: + - a string with the `shortcut name` of a pre-trained model configuration to load from cache + or download and cache if not already stored in cache (e.g. 'bert-base-uncased'). + - a path to a `directory` containing a configuration file saved + using the `save_pretrained(save_directory)` method. + - a path or url to a saved configuration `file`. + **cache_dir**: (`optional`) string: + Path to a directory in which a downloaded pre-trained model + configuration should be cached if the standard cache should not be used. + **return_unused_kwargs**: (`optional`) bool: + - If False, then this function returns just the final configuration object. + - If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs` + is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: + ie the part of kwargs which has not been used to update `config` and is otherwise ignored. + **kwargs**: (`optional`) dict: + Dictionary of key/value pairs with which to update the configuration object after loading. + - The values in kwargs of any keys which are configuration attributes will be used + to override the loaded values. + - Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled + by the `return_unused_kwargs` keyword parameter. + + Examples:: + + >>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. + >>> config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')` + >>> config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json') + >>> config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False) + >>> assert config.output_attention == True + >>> config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, + >>> foo=False, return_unused_kwargs=True) + >>> assert config.output_attention == True + >>> assert unused_kwargs == {'foo': False} + + """ + cache_dir = kwargs.pop('cache_dir', None) + return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) + + if pretrained_model_name_or_path in cls.pretrained_config_archive_map: + config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path] + elif os.path.isdir(pretrained_model_name_or_path): + config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) + else: + config_file = pretrained_model_name_or_path + # redirect to the cache, if necessary + try: + resolved_config_file = cached_path(config_file, cache_dir=cache_dir) + except EnvironmentError: + if pretrained_model_name_or_path in cls.pretrained_config_archive_map: + logger.error( + "Couldn't reach server at '{}' to download pretrained model configuration file.".format( + config_file)) + else: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(cls.pretrained_config_archive_map.keys()), + config_file)) + return None + if resolved_config_file == config_file: + logger.info("loading configuration file {}".format(config_file)) + else: + logger.info("loading configuration file {} from cache at {}".format( + config_file, resolved_config_file)) + + # Load config + config = cls.from_json_file(resolved_config_file) + + # Update config with kwargs if needed + to_remove = [] + for key, value in kwargs.items(): + if hasattr(config, key): + setattr(config, key, value) + to_remove.append(key) + for key in to_remove: + kwargs.pop(key, None) + + logger.info("Model config %s", config) + if return_unused_kwargs: + return config, kwargs + else: + return config + + @classmethod + def from_dict(cls, json_object): + """Constructs a `Config` from a Python dictionary of parameters.""" + config = cls(vocab_size_or_config_json_file=-1) + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path): + """ Save this instance to a json file.""" + with open(json_file_path, "w", encoding='utf-8') as writer: + writer.write(self.to_json_string()) + + +class PreTrainedModel(nn.Module): + """ Base class for all models. Handle loading/storing model config and + a simple interface for dowloading and loading pretrained models. + """ + config_class = PretrainedConfig + pretrained_model_archive_map = {} + load_tf_weights = lambda model, config, path: None + base_model_prefix = "" + input_embeddings = None + + def __init__(self, config, *inputs, **kwargs): + super(PreTrainedModel, self).__init__() + if not isinstance(config, PretrainedConfig): + raise ValueError( + "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " + "To create a model from a pretrained model use " + "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + self.__class__.__name__, self.__class__.__name__ + )) + # Save config in model + self.config = config + + def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): + """ Build a resized Embedding Module from a provided token Embedding Module. + Increasing the size will add newly initialized vectors at the end + Reducing the size will remove vectors from the end + + Args: + new_num_tokens: (`optional`) int + New number of tokens in the embedding matrix. + Increasing the size will add newly initialized vectors at the end + Reducing the size will remove vectors from the end + If not provided or None: return the provided token Embedding Module. + Return: ``torch.nn.Embeddings`` + Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None + """ + if new_num_tokens is None: + return old_embeddings + + old_num_tokens, old_embedding_dim = old_embeddings.weight.size() + if old_num_tokens == new_num_tokens: + return old_embeddings + + # Build new embeddings + new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) + new_embeddings.to(old_embeddings.weight.device) + + # initialize all new embeddings (in particular added tokens) + self.init_weights(new_embeddings) + + # Copy word embeddings from the previous weights + num_tokens_to_copy = min(old_num_tokens, new_num_tokens) + new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] + + return new_embeddings + + def _tie_or_clone_weights(self, first_module, second_module): + """ Tie or clone module weights depending of weither we are using TorchScript or not + """ + if self.config.torchscript: + first_module.weight = nn.Parameter(second_module.weight.clone()) + else: + first_module.weight = second_module.weight + + def resize_token_embeddings(self, new_num_tokens=None): + """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. + Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Args: + new_num_tokens: (`optional`) int + New number of tokens in the embedding matrix. + Increasing the size will add newly initialized vectors at the end + Reducing the size will remove vectors from the end + If not provided or None: does nothing and just returns a pointer to the input tokens Embedding Module of the model. + + Return: ``torch.nn.Embeddings`` + Pointer to the input tokens Embedding Module of the model + """ + base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed + model_embeds = base_model._resize_token_embeddings(new_num_tokens) + if new_num_tokens is None: + return model_embeds + + # Update base model and current model config + self.config.vocab_size = new_num_tokens + base_model.vocab_size = new_num_tokens + + # Tie weights again if needed + if hasattr(self, 'tie_weights'): + self.tie_weights() + + return model_embeds + + def prune_heads(self, heads_to_prune): + """ Prunes heads of the base model. + Args: + heads_to_prune: dict of {layer_num (int): list of heads to prune in this layer (list of int)} + """ + base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed + base_model._prune_heads(heads_to_prune) + + def save_pretrained(self, save_directory): + """ Save a model with its configuration file to a directory, so that it + can be re-loaded using the `from_pretrained(save_directory)` class method. + """ + assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" + + # Only save the model it-self if we are using distributed training + model_to_save = self.module if hasattr(self, 'module') else self + + # Save configuration file + model_to_save.config.save_pretrained(save_directory) + + # If we save using the predefined names, we can load using `from_pretrained` + output_model_file = os.path.join(save_directory, WEIGHTS_NAME) + + torch.save(model_to_save.state_dict(), output_model_file) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + r"""Instantiate a pretrained pytorch model from a pre-trained model configuration. + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are desactivated) + To train the model, you should first set it back in training mode with `model.train()` + + Params: + **pretrained_model_name_or_path**: either: + - a string with the `shortcut name` of a pre-trained model to load from cache + or download and cache if not already stored in cache (e.g. 'bert-base-uncased'). + - a path to a `directory` containing a configuration file saved + using the `save_pretrained(save_directory)` method. + - a path or url to a tensorflow index checkpoint `file` (e.g. `./tf_model/model.ckpt.index`). + In this case, ``from_tf`` should be set to True and a configuration object should be + provided as `config` argument. This loading option is slower than converting the TensorFlow + checkpoint in a PyTorch model using the provided conversion scripts and loading + the PyTorch model afterwards. + **model_args**: (`optional`) Sequence: + All remaning positional arguments will be passed to the underlying model's __init__ function + **config**: an optional configuration for the model to use instead of an automatically loaded configuation. + Configuration can be automatically loaded when: + - the model is a model provided by the library (loaded with a `shortcut name` of a pre-trained model), or + - the model was saved using the `save_pretrained(save_directory)` (loaded by suppling the save directory). + **state_dict**: an optional state dictionnary for the model to use instead of a state dictionary loaded + from saved weights file. + This option can be used if you want to create a model from a pretrained configuraton but load your own weights. + In this case though, you should check if using `save_pretrained(dir)` and `from_pretrained(save_directory)` is not + a simpler option. + **cache_dir**: (`optional`) string: + Path to a directory in which a downloaded pre-trained model + configuration should be cached if the standard cache should not be used. + **output_loading_info**: (`optional`) boolean: + Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. + **kwargs**: (`optional`) dict: + Dictionary of key, values to update the configuration object after loading. + Can be used to override selected configuration parameters. E.g. ``output_attention=True``. + + - If a configuration is provided with `config`, **kwargs will be directly passed + to the underlying model's __init__ method. + - If a configuration is not provided, **kwargs will be first passed to the pretrained + model configuration class loading function (`PretrainedConfig.from_pretrained`). + Each key of **kwargs that corresponds to a configuration attribute + will be used to override said attribute with the supplied **kwargs value. + Remaining keys that do not correspond to any configuration attribute will + be passed to the underlying model's __init__ function. + + Examples:: + + >>> model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. + >>> model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` + >>> model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading + >>> assert model.config.output_attention == True + >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) + >>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') + >>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) + + """ + config = kwargs.pop('config', None) + state_dict = kwargs.pop('state_dict', None) + cache_dir = kwargs.pop('cache_dir', None) + from_tf = kwargs.pop('from_tf', False) + output_loading_info = kwargs.pop('output_loading_info', False) + + # Load config + if config is None: + config, model_kwargs = cls.config_class.from_pretrained( + pretrained_model_name_or_path, *model_args, + cache_dir=cache_dir, return_unused_kwargs=True, + **kwargs + ) + else: + model_kwargs = kwargs + + # Load model + if pretrained_model_name_or_path in cls.pretrained_model_archive_map: + archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] + elif os.path.isdir(pretrained_model_name_or_path): + if from_tf: + # Directly load from a TensorFlow checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") + else: + archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) + else: + if from_tf: + # Directly load from a TensorFlow checkpoint + archive_file = pretrained_model_name_or_path + ".index" + else: + archive_file = pretrained_model_name_or_path + # redirect to the cache, if necessary + try: + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) + except EnvironmentError: + if pretrained_model_name_or_path in cls.pretrained_model_archive_map: + logger.error( + "Couldn't reach server at '{}' to download pretrained weights.".format( + archive_file)) + else: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(cls.pretrained_model_archive_map.keys()), + archive_file)) + return None + if resolved_archive_file == archive_file: + logger.info("loading weights file {}".format(archive_file)) + else: + logger.info("loading weights file {} from cache at {}".format( + archive_file, resolved_archive_file)) + + # Instantiate model. + model = cls(config, *model_args, **model_kwargs) + + if state_dict is None and not from_tf: + state_dict = torch.load(resolved_archive_file, map_location='cpu') + if from_tf: + # Directly load from a TensorFlow checkpoint + return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' + + # Convert old format to new format if needed from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if 'gamma' in key: + new_key = key.replace('gamma', 'weight') + if 'beta' in key: + new_key = key.replace('beta', 'bias') + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + # Load from a PyTorch state_dict + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + # Make sure we are able to load base models as well as derived models (with heads) + start_prefix = '' + model_to_load = model + if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): + start_prefix = cls.base_model_prefix + '.' + if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()): + model_to_load = getattr(model, cls.base_model_prefix) + + load(model_to_load, prefix=start_prefix) + if len(missing_keys) > 0: + logger.info("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + + print(" Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + + if len(unexpected_keys) > 0: + logger.info("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + + print(" Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + model.__class__.__name__, "\n\t".join(error_msgs))) + + if hasattr(model, 'tie_weights'): + model.tie_weights() # make sure word embedding weights are still tied + + # Set model in evaluation mode to desactivate DropOut modules by default + model.eval() + + if output_loading_info: + loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs} + return model, loading_info + + return model + + +class Conv1D(nn.Module): + def __init__(self, nf, nx): + """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2) + Basically works like a Linear layer but the weights are transposed + """ + super(Conv1D, self).__init__() + self.nf = nf + w = torch.empty(nx, nf) + nn.init.normal_(w, std=0.02) + self.weight = nn.Parameter(w) + self.bias = nn.Parameter(torch.zeros(nf)) + + def forward(self, x): + size_out = x.size()[:-1] + (self.nf,) + x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) + x = x.view(*size_out) + return x + + +class PoolerStartLogits(nn.Module): + """ Compute SQuAD start_logits from sequence hidden states. """ + def __init__(self, config): + super(PoolerStartLogits, self).__init__() + self.dense = nn.Linear(config.hidden_size, 1) + + def forward(self, hidden_states, p_mask=None): + """ Args: + **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)` + invalid position mask such as query and special symbols (PAD, SEP, CLS) + 1.0 means token should be masked. + """ + x = self.dense(hidden_states).squeeze(-1) + + if p_mask is not None: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerEndLogits(nn.Module): + """ Compute SQuAD end_logits from sequence hidden states and start token hidden state. + """ + def __init__(self, config): + super(PoolerEndLogits, self).__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dense_1 = nn.Linear(config.hidden_size, 1) + + def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None): + """ Args: + One of ``start_states``, ``start_positions`` should be not None. + If both are set, ``start_positions`` overrides ``start_states``. + + **start_states**: ``torch.LongTensor`` of shape identical to hidden_states + hidden states of the first tokens for the labeled span. + **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` + position of the first token for the labeled span: + **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)`` + Mask of invalid position such as query and special symbols (PAD, SEP, CLS) + 1.0 means token should be masked. + """ + assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None" + if start_positions is not None: + slen, hsz = hidden_states.shape[-2:] + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) + start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) + + x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) + x = self.activation(x) + x = self.LayerNorm(x) + x = self.dense_1(x).squeeze(-1) + + if p_mask is not None: + x = x * (1 - p_mask) - 1e30 * p_mask + + return x + + +class PoolerAnswerClass(nn.Module): + """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """ + def __init__(self, config): + super(PoolerAnswerClass, self).__init__() + self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) + self.activation = nn.Tanh() + self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) + + def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None): + """ + Args: + One of ``start_states``, ``start_positions`` should be not None. + If both are set, ``start_positions`` overrides ``start_states``. + + **start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``. + hidden states of the first tokens for the labeled span. + **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` + position of the first token for the labeled span. + **cls_index**: torch.LongTensor of shape ``(batch_size,)`` + position of the CLS token. If None, take the last token. + + note(Original repo): + no dependency on end_feature so that we can obtain one single `cls_logits` + for each sample + """ + hsz = hidden_states.shape[-1] + assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None" + if start_positions is not None: + start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) + + if cls_index is not None: + cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) + cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) + else: + cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) + + x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) + x = self.activation(x) + x = self.dense_1(x).squeeze(-1) + + return x + + +class SQuADHead(nn.Module): + r""" A SQuAD head inspired by XLNet. + + Parameters: + config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model. + + Inputs: + **hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)`` + hidden states of sequence tokens + **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` + position of the first token for the labeled span. + **end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)`` + position of the last token for the labeled span. + **cls_index**: torch.LongTensor of shape ``(batch_size,)`` + position of the CLS token. If None, take the last token. + **is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)`` + Whether the question has a possible answer in the paragraph or not. + **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)`` + Mask of invalid position such as query and special symbols (PAD, SEP, CLS) + 1.0 means token should be masked. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. + **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) + ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)`` + Log probabilities for the top config.start_n_top start token possibilities (beam-search). + **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) + ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)`` + Indices for the top config.start_n_top start token possibilities (beam-search). + **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) + ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` + Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). + **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) + ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)`` + Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). + **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided) + ``torch.FloatTensor`` of shape ``(batch_size,)`` + Log probabilities for the ``is_impossible`` label of the answers. + """ + def __init__(self, config): + super(SQuADHead, self).__init__() + self.start_n_top = config.start_n_top + self.end_n_top = config.end_n_top + + self.start_logits = PoolerStartLogits(config) + self.end_logits = PoolerEndLogits(config) + self.answer_class = PoolerAnswerClass(config) + + def forward(self, hidden_states, start_positions=None, end_positions=None, + cls_index=None, is_impossible=None, p_mask=None): + outputs = () + + start_logits = self.start_logits(hidden_states, p_mask=p_mask) + + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, let's remove the dimension added by batch splitting + for x in (start_positions, end_positions, cls_index, is_impossible): + if x is not None and x.dim() > 1: + x.squeeze_(-1) + + # during training, compute the end logits based on the ground truth of the start position + end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) + + loss_fct = CrossEntropyLoss() + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if cls_index is not None and is_impossible is not None: + # Predict answerability from the representation of CLS and START + cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) + loss_fct_cls = nn.BCEWithLogitsLoss() + cls_loss = loss_fct_cls(cls_logits, is_impossible) + + # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss + total_loss += cls_loss * 0.5 + + outputs = (total_loss,) + outputs + + else: + # during inference, compute the end logits based on beam search + bsz, slen, hsz = hidden_states.size() + start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) + + start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top) + start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) + start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) + start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) + + hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz) + p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None + end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) + end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) + + end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top) + end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) + end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) + + start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) + cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) + + outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs + + # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits + # or (if labels are provided) (total_loss,) + return outputs + + +class SequenceSummary(nn.Module): + r""" Compute a single vector summary of a sequence hidden states according to various possibilities: + Args of the config class: + summary_type: + - 'last' => [default] take the last token hidden state (like XLNet) + - 'first' => take the first token hidden state (like Bert) + - 'mean' => take the mean of all tokens hidden states + - 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2) + - 'attn' => Not implemented now, use multi-head attention + summary_use_proj: Add a projection after the vector extraction + summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False. + summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default + summary_first_dropout: Add a dropout before the projection and activation + summary_last_dropout: Add a dropout after the projection and activation + """ + def __init__(self, config): + super(SequenceSummary, self).__init__() + + self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last' + if config.summary_type == 'attn': + # We should use a standard multi-head attention module with absolute positional embedding for that. + # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 + # We can probably just use the multi-head attention module of PyTorch >=1.1.0 + raise NotImplementedError + + self.summary = Identity() + if hasattr(config, 'summary_use_proj') and config.summary_use_proj: + if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0: + num_classes = config.num_labels + else: + num_classes = config.hidden_size + self.summary = nn.Linear(config.hidden_size, num_classes) + + self.activation = Identity() + if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh': + self.activation = nn.Tanh() + + self.first_dropout = Identity() + if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0: + self.first_dropout = nn.Dropout(config.summary_first_dropout) + + self.last_dropout = Identity() + if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0: + self.last_dropout = nn.Dropout(config.summary_last_dropout) + + def forward(self, hidden_states, token_ids=None): + """ hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer. + token_ids: [optional] index of the classification token if summary_type == 'token_ids', + shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states. + if summary_type == 'token_ids' and token_ids is None: + we take the last token of the sequence as classification token + """ + if self.summary_type == 'last': + output = hidden_states[:, -1] + elif self.summary_type == 'first': + output = hidden_states[:, 0] + elif self.summary_type == 'mean': + output = hidden_states.mean(dim=1) + elif self.summary_type == 'token_ids': + if token_ids is None: + token_ids = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long) + else: + token_ids = token_ids.unsqueeze(-1).unsqueeze(-1) + token_ids = token_ids.expand((-1,) * (token_ids.dim()-1) + (hidden_states.size(-1),)) + # shape of token_ids: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states + output = hidden_states.gather(-2, token_ids).squeeze(-2) # shape (bsz, XX, hidden_size) + elif self.summary_type == 'attn': + raise NotImplementedError + + output = self.first_dropout(output) + output = self.summary(output) + output = self.activation(output) + output = self.last_dropout(output) + + return output + + +def prune_linear_layer(layer, index, dim=0): + """ Prune a linear layer (a model parameters) to keep only entries in index. + Return the pruned layer as a new layer with requires_grad=True. + Used to remove heads. + """ + index = index.to(layer.weight.device) + W = layer.weight.index_select(dim, index).clone().detach() + if layer.bias is not None: + if dim == 1: + b = layer.bias.clone().detach() + else: + b = layer.bias[index].clone().detach() + new_size = list(layer.weight.size()) + new_size[dim] = len(index) + new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) + new_layer.weight.requires_grad = False + new_layer.weight.copy_(W.contiguous()) + new_layer.weight.requires_grad = True + if layer.bias is not None: + new_layer.bias.requires_grad = False + new_layer.bias.copy_(b.contiguous()) + new_layer.bias.requires_grad = True + return new_layer + + +def prune_conv1d_layer(layer, index, dim=1): + """ Prune a Conv1D layer (a model parameters) to keep only entries in index. + A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. + Return the pruned layer as a new layer with requires_grad=True. + Used to remove heads. + """ + index = index.to(layer.weight.device) + W = layer.weight.index_select(dim, index).clone().detach() + if dim == 0: + b = layer.bias.clone().detach() + else: + b = layer.bias[index].clone().detach() + new_size = list(layer.weight.size()) + new_size[dim] = len(index) + new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) + new_layer.weight.requires_grad = False + new_layer.weight.copy_(W.contiguous()) + new_layer.weight.requires_grad = True + new_layer.bias.requires_grad = False + new_layer.bias.copy_(b.contiguous()) + new_layer.bias.requires_grad = True + return new_layer + + +def prune_layer(layer, index, dim=None): + """ Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index. + Return the pruned layer as a new layer with requires_grad=True. + Used to remove heads. + """ + if isinstance(layer, nn.Linear): + return prune_linear_layer(layer, index, dim=0 if dim is None else dim) + elif isinstance(layer, Conv1D): + return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) + else: + raise ValueError("Can't prune layer of class {}".format(layer.__class__)) diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/waypoint_bert.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/waypoint_bert.py new file mode 100755 index 0000000..d4d4147 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/transformer/waypoint_bert.py @@ -0,0 +1,217 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. +# Modified in Recurrent VLN-BERT, 2020, Yicong.Hong@anu.edu.au + +from __future__ import absolute_import, division, print_function, unicode_literals +import logging +import math +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, MSELoss + +from .pytorch_transformer.modeling_bert import (BertEmbeddings, + BertSelfAttention, BertAttention, BertEncoder, BertLayer, + BertSelfOutput, BertIntermediate, BertOutput, + BertPooler, BertLayerNorm, BertPreTrainedModel, + BertPredictionHeadTransform) + +logger = logging.getLogger(__name__) + +class VisPosEmbeddings(nn.Module): + def __init__(self, config): + super(VisPosEmbeddings, self).__init__() + self.position_embeddings = nn.Embedding(24, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) + # self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_vis_feats, position_ids=None): + seq_length = input_vis_feats.size(1) + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_vis_feats.device) + position_ids = position_ids.unsqueeze(0).repeat(input_vis_feats.size(0), 1) + + vis_embeddings = input_vis_feats + position_embeddings = self.position_embeddings(position_ids) + + embeddings = vis_embeddings + position_embeddings + embeddings = self.LayerNorm(embeddings) + # embeddings = self.dropout(embeddings) + return embeddings + +class CaptionBertSelfAttention(BertSelfAttention): + """ + Modified from BertSelfAttention to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertSelfAttention, self).__init__(config) + self.config = config + + def forward(self, hidden_states, attention_mask, head_mask=None, + history_state=None): + if history_state is not None: + x_states = torch.cat([history_state, hidden_states], dim=1) + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(x_states) + mixed_value_layer = self.value(x_states) + else: + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + ''' language feature only provide Keys and Values ''' + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_scores) + + return outputs + + +class CaptionBertAttention(BertAttention): + """ + Modified from BertAttention to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertAttention, self).__init__(config) + self.self = CaptionBertSelfAttention(config) + self.output = BertSelfOutput(config) + self.config = config + + def forward(self, input_tensor, attention_mask, head_mask=None, + history_state=None): + ''' transformer processing ''' + self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state) + + ''' feed-forward network with residule ''' + attention_output = self.output(self_outputs[0], input_tensor) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + + return outputs + + +class CaptionBertLayer(BertLayer): + """ + Modified from BertLayer to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertLayer, self).__init__(config) + self.attention = CaptionBertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, head_mask=None, + history_state=None): + + attention_outputs = self.attention(hidden_states, attention_mask, + head_mask, history_state) + + ''' feed-forward network with residule ''' + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] + + return outputs + + +class CaptionBertEncoder(BertEncoder): + """ + Modified from BertEncoder to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertEncoder, self).__init__(config) + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + # 12 Bert layers + self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)]) + self.config = config + + def forward(self, hidden_states, attention_mask, head_mask=None, + encoder_history_states=None): + + for i, layer_module in enumerate(self.layer): + history_state = None if encoder_history_states is None else encoder_history_states[i] # default None + + layer_outputs = layer_module( + hidden_states, attention_mask, head_mask[i], + history_state) + hidden_states = layer_outputs[0] + + if i == self.config.num_hidden_layers - 1: + slang_attention_score = layer_outputs[1] + + outputs = (hidden_states, slang_attention_score) + + return outputs + + +class BertImgModel(nn.Module): + """ Expand from BertModel to handle image region features as input + """ + def __init__(self, config): + super(BertImgModel, self).__init__() + self.config = config + # self.vis_pos_embeds = VisPosEmbeddings(config) + self.encoder = CaptionBertEncoder(config) + + def forward(self, input_x, attention_mask=None): + + extended_attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + head_mask = [None] * self.config.num_hidden_layers + + ''' positional encodings ''' + # input_x = self.vis_pos_embeds(input_x) + + ''' pass to the Transformer layers ''' + encoder_outputs = self.encoder(input_x, + extended_attention_mask, head_mask=head_mask) + + outputs = (encoder_outputs[0],) + encoder_outputs[1:] + + return outputs + + +class WaypointBert(nn.Module): + """ + Modified from BertForMultipleChoice to support oscar training. + """ + def __init__(self, config=None): + super(WaypointBert, self).__init__() + self.config = config + self.bert = BertImgModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_x, attention_mask=None): + + outputs = self.bert(input_x, attention_mask=attention_mask) + + sequence_output = outputs[0] + sequence_output = self.dropout(sequence_output) + + return sequence_output \ No newline at end of file diff --git a/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/utils.py b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/utils.py new file mode 100755 index 0000000..7eaf219 --- /dev/null +++ b/Downstream/Visual-Language-Navigation/vlnce_baselines/waypoint_pred/utils.py @@ -0,0 +1,102 @@ + +import torch +import numpy as np +import sys +import glob +import json + +def neighborhoods(mu, x_range, y_range, sigma, circular_x=True, gaussian=False): + """ Generate masks centered at mu of the given x and y range with the + origin in the centre of the output + Inputs: + mu: tensor (N, 2) + Outputs: + tensor (N, y_range, s_range) + """ + x_mu = mu[:,0].unsqueeze(1).unsqueeze(1) + y_mu = mu[:,1].unsqueeze(1).unsqueeze(1) + + # Generate bivariate Gaussians centered at position mu + x = torch.arange(start=0,end=x_range, device=mu.device, dtype=mu.dtype).unsqueeze(0).unsqueeze(0) + y = torch.arange(start=0,end=y_range, device=mu.device, dtype=mu.dtype).unsqueeze(1).unsqueeze(0) + + y_diff = y - y_mu + x_diff = x - x_mu + if circular_x: + x_diff = torch.min(torch.abs(x_diff), torch.abs(x_diff + x_range)) + if gaussian: + output = torch.exp(-0.5 * ((x_diff/sigma[0])**2 + (y_diff/sigma[1])**2 )) + else: + output = torch.logical_and( + torch.abs(x_diff) <= sigma[0], torch.abs(y_diff) <= sigma[1] + ).type(mu.dtype) + + return output + + +def nms(pred, max_predictions=10, sigma=(1.0,1.0), gaussian=False): + ''' Input (batch_size, 1, height, width) ''' + + shape = pred.shape + + output = torch.zeros_like(pred) + flat_pred = pred.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48) + supp_pred = pred.clone() + flat_output = output.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48) + + for i in range(max_predictions): + # Find and save max over the entire map + flat_supp_pred = supp_pred.reshape((shape[0],-1)) + val, ix = torch.max(flat_supp_pred, dim=1) + indices = torch.arange(0,shape[0]) + flat_output[indices,ix] = flat_pred[indices,ix] + + # Suppression + y = ix / shape[-1] + x = ix % shape[-1] + mu = torch.stack([x,y], dim=1).float() + + g = neighborhoods(mu, shape[-1], shape[-2], sigma, gaussian=gaussian) + + supp_pred *= (1-g.unsqueeze(1)) + + output[output < 0] = 0 + return output + + +def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50): + """ + Call in a loop to create terminal progress bar + @params: + iteration - Required : current iteration (Int) + total - Required : total iterations (Int) + prefix - Optional : prefix string (Str) + suffix - Optional : suffix string (Str) + decimals - Optional : positive number of decimals in percent complete (Int) + bar_length - Optional : character length of bar (Int) + """ + str_format = "{0:." + str(decimals) + "f}" + percents = str_format.format(100 * (iteration / float(total))) + filled_length = int(round(bar_length * iteration / float(total))) + bar = '█' * filled_length + '-' * (bar_length - filled_length) + + sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)), + + if iteration == total: + sys.stdout.write('\n') + sys.stdout.flush() + + +def get_attention_mask(num_imgs=12, neighbor=1): + assert neighbor <= 5 + + mask = np.zeros((num_imgs,num_imgs)) + t = np.zeros(num_imgs) + t[:neighbor+1] = np.ones(neighbor+1) + if neighbor != 0: + t[-neighbor:] = np.ones(neighbor) + for ri in range(num_imgs): + mask[ri] = t + t = np.roll(t, 1) + + return torch.from_numpy(mask).reshape(1,1,num_imgs,num_imgs).long() \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/config.py b/Downstream/multi-modalities-downstream/CoTrain/config.py new file mode 100644 index 0000000..3c8a095 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/config.py @@ -0,0 +1,936 @@ +from sacred import Experiment + +ex = Experiment("CoTrain", save_git_info=False) + + +def _loss_names(d): + ret = { + # pretrain + "vtm": 0, + "mlm": 0, + "mpp": 0, + "vtc": 0, + "vcop": 0, + "dino": 0, + # downstream + "vqa": 0, + "openend_vqa": 0, + "mc_vqa": 0, + "nlvr2": 0, + "irtr": 0, + "multiple_choice": 0, + 'vcr_q2a': 0, + 'zs_classify': 0, + 'contrastive': 0, + 'cap': 0, + 'mim': 0, + } + ret.update(d) + return ret + + +@ex.config +def config(): + exp_name = "CoTrain" + seed = 0 + video_datasets = ["wevid", "howto100m", "yttemporal"] + image_datasets = ["cc3m", "cc12m"] + val_datasets = [] + loss_names = _loss_names({"vtm": 1, "mlm": 1}) + val_loss_names = _loss_names({}) + batch_size = 4096 # 128 x 32 + # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller. + linear_evaluation = False + + draw_false_image = 1 + # video setting + train_transform_keys = ["pixelbert"] + val_transform_keys = ["pixelbert"] + image_size = 224 # 384/224 + patch_size = 16 # 16/32 + max_image_len = -1 + draw_false_video = 1 + video_only = False + num_frames = 3 # input video frames + + # Text Setting + vqav2_label_size = 3129 + msrvttqa_label_size = 1501 + max_text_len = 40 # original: 40, 200: for long sentences/paragraph + tokenizer = "pretrained/bert-base-uncased" + vocab_size = 30522 + whole_word_masking = False + mlm_prob = 0.15 + draw_false_text = 0 + + draw_options_text = 0 + # Transformer Setting + vit = "vit_base_patch16_224" # "vit_base_patch32_384" / "vit_base_patch16_224" + hidden_size = 768 + num_heads = 12 + num_layers = 12 + mlp_ratio = 4 + drop_rate = 0.1 + shared_embedding_dim = 512 # add for contrastive learning 512/256 + # model_temporal_frames = 4 # add for model define, may not consistent with input data + + save_checkpoints_interval = 1 # save each 5 epochs + + # Optimizer Setting + optim_type = "adamw" + learning_rate = 1e-4 + weight_decay = 0.01 + decay_power = 1 + max_epoch = 100 + max_steps = 25000 + warmup_steps = 2500 + end_lr = 0 + lr_mult = 1 # multiply lr for downstream heads + backend = 'a100' # gpu: a100/v100/others + + # Downstream Setting + get_recall_metric = False + get_ind_recall_metric = False + retrieval_views = 3 # how many views for retrieval + + # PL Trainer Setting + resume_from = None + fast_dev_run = False + val_check_interval = 1.0 + test_only = False + + # below params varies with the environment + data_root = "" + log_dir = "result" + per_gpu_batchsize = 0 # you should define this manually with per_gpu_batch_size=# + num_gpus = 1 + num_nodes = 1 + load_path = "" + num_workers = 16 # 0 will not lead to unstable memory usage but slow training ? + precision = 16 + model_dir = None + + # clip related settings + clip = "" + clip_type = "ori" # In ["evl", "ori"] + clip_freeze = False + clip_freeze_text = False + clip_dpr = 0.0 + prompt_type = "all" + clip_lr_mult = 1 + clip_no_pretrain = False + clip_grad_unfreeze_int = 0 # <= 0 for nothing + clip_evl_dropout = 0.5 + mim_prob = 0.90 + clip_mlm_decoder_n_layers = 4 + clip_mim_decoder_n_layers = 4 + clip_mim_decoder_width = 512 + clip_cap_decoder_n_layers = 4 + clip_init_zero = True + clip_qa_type = "vtc" # vtc for contrastive, cap for caption head, both for both + clip_mc_type = "vtc" # vtc for contrastive, cap for caption head, both for both + # weight = clip_weight * clip_wiseft_coef + load_path * (1 - clip_wiseft_coef), <= 0 for not using + clip_wiseft_coef = -1.0 + clip_mmt = False + clip_alt_data = False + image_data_mult = 1 + clip_cls_dropout = 0.5 + save_last = True + save_top_k = 1 + clip_use_checkpoint = False + clip_checkpoint_num = [0, 0, 0] + clip_momentum_ckpt = 1 + clip_momentum_interval = 1 + + +# Named configs for "environment" which define gpus and nodes, and paths +@ex.named_config +def env_dandelin(): + data_root = "/data2/dsets/dataset" + log_dir = "/data2/CoTrain/result" + num_gpus = 8 + num_nodes = 1 + +# ================================ begin: pretrain ====================== +@ex.named_config +def task_mlm_vtm_cotrain(): + exp_name = "mlm_vtm" + video_datasets = ["webvid"] # "howto100m", + image_datasets = ["cc3m"] + loss_names = _loss_names({"vtm": 1, "mlm": 1}) + batch_size = 2048 + max_epoch = 30 + max_image_len = -1 + val_check_interval = 1.0 + save_checkpoints_interval = 3 # save each 5 epochs + +@ex.named_config +def task_mlm_vtm_cotrain_seven(): + exp_name = "mlm_vtm" + video_datasets = ["webvid", 'yttemporal', "howto100m"] # 'yttemporal', "howto100m", + image_datasets = ["cc3m", "cc12m", "vg", 'coco'] # , "vg", 'coco' + loss_names = _loss_names({"vtm": 1, "mlm": 1}) + batch_size = 2048 + max_epoch = 30 + max_image_len = -1 + val_check_interval = 1.0 + save_checkpoints_interval = 1 # save each 5 epochs + +@ex.named_config +def task_mlm_vtm_vcop_cotrain(): + exp_name = "mlm_vtm" + video_datasets = ["howto100m", "webvid"] + image_datasets = ["cc3m"] + loss_names = _loss_names({"vtm": 1, "mlm": 1, "vcop": 1}) + batch_size = 2048 + max_epoch = 30 + max_image_len = -1 + val_check_interval = 1.0 + save_checkpoints_interval = 5 # save each 5 epochs + +@ex.named_config +def task_mlm_vtm_dino_cotrain(): + exp_name = "mlm_vtm_dino_1f" + video_datasets = ["webvid"] # "howto100m", + image_datasets = ["cc3m"] + loss_names = _loss_names({"vtm": 1, "mlm": 1, "dino": 1}) # already include dino + train_transform_keys = ["pixelbert_randaug"] + val_transform_keys = ["pixelbert_randaug"] + batch_size = 1024 + max_epoch = 100 + max_image_len = -1 + val_check_interval = 1.0 + save_checkpoints_interval = 1 # save each 5 epochs +# ================================ end: pretrain ====================== + + +# ================================ begin: finetune ====================== +# ========== begin: multiple choice ================ +# = for lsmdc multiple choice +@ex.named_config +def task_finetune_lsmdcchoice(): + exp_name = "finetune_lsmdc_choice" + video_datasets = ["lsmdc_choice"] + image_datasets = [] + loss_names = _loss_names({"multiple_choice": 1}) + batch_size = 256 + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 + draw_false_text = 5 # 5 choices + learning_rate = 1e-5 + val_check_interval = 0.5 + lr_mult = 10 + +# = for msrvtt multiple choice +@ex.named_config +def task_finetune_msrvttchoice(): + exp_name = "finetune_msrvtt_choice" + video_datasets = ["msrvtt_choice"] + image_datasets = [] + loss_names = _loss_names({"multiple_choice": 1}) + batch_size = 256 + max_epoch = 10 + max_steps = None + warmup_steps = 0.1 + draw_false_text = 5 # 5 choices + learning_rate = 1e-4 + val_check_interval = 0.5 + lr_mult = 10 +# ========== end: multiple choice ================ +# ind itc +# ========== begin: retrieval ================ +@ex.named_config +def task_finetune_vtc_irtr_msrvtt(): + exp_name = "finetune_vtc_irtr_msrvtt" + video_datasets = ["msrvtt"] + image_datasets = [] + train_transform_keys = ["pixelbert_randaug"] + loss_names = _loss_names({"vtc": 1}) + batch_size = 1024 + max_epoch = 50 + max_steps = None + warmup_steps = 0.1 # 0.1/0.3 + retrieval_views = 1 # use 5 views + get_recall_metric = False + get_ind_recall_metric = True + draw_false_text = 15 + learning_rate = 6e-4 # 1/3e-4 +# ========== end: retrieval ================ +# ========== begin: vqa ================ +# for msvd qa +@ex.named_config +def task_finetune_msvdqa(): + exp_name = "finetune_msvd_qa" + video_datasets = ["msvdqa"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) # msvd have same number of answers with msrvtt + batch_size = 512 + msrvttqa_label_size = 1001 # vqa voculbary length 1000 + 1 background + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 + draw_false_image = 0 + learning_rate = 1e-4 # 1e-4 + val_check_interval = 1.0 + lr_mult = 10 + +# = add by for msrvtt qa +@ex.named_config +def task_finetune_msrvttqa(): + exp_name = "finetune_msrvtt_qa" + video_datasets = ["msrvttqa"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) + batch_size = 512 + msrvttqa_label_size = 1501 # 1501 / 4540 + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 # 0.1 + draw_false_image = 1 + draw_false_text = 1 + learning_rate = 1e-4 # 1e-4 normal + val_check_interval = 1.0 + lr_mult = 10 + + + +# = for tgif qa on frameqa +@ex.named_config +def task_finetune_tgifqa(): + exp_name = "finetune_tgif_qa" + video_datasets = ["tgif"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) + batch_size = 512 + msrvttqa_label_size = 1541 # vqa voculbary length 1540 + 1 background + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 + draw_false_image = 0 + learning_rate = 1e-4 # 1e-4 + val_check_interval = 1.0 + lr_mult = 10 + +# = for tgif qa on action/trans +@ex.named_config +def task_finetune_tgif_action_trans(): + exp_name = "finetune_tgif_action_trans" + video_datasets = ["tgifqa"] + image_datasets = [] + loss_names = _loss_names({"mc_vqa": 1}) + batch_size = 512 + max_epoch = 100 + max_steps = None + warmup_steps = 0.1 + draw_false_image = 0 + draw_options_text = 5 # 5 choices + learning_rate = 1e-4 # 1e-4 + val_check_interval = 1.0 + lr_mult = 10 + +# ========== end: vqa ================ + + +# Task5: ===================== action recognition ===================== +@ex.named_config +def task_finetune_action_recognition_hmdb51(): + exp_name = "finetune_action_recognition_hmdb51" + video_datasets = ["hmdb51"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) # have + msrvttqa_label_size = 52 # 51 + 1 + batch_size = 256 + max_epoch = 50 + max_steps = None + warmup_steps = 0.1 + draw_false_text = 15 + learning_rate = 1e-4 + + +@ex.named_config +def task_finetune_action_recognition_k400(): + exp_name = "finetune_action_recognition_k400" + video_datasets = ["k400"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) # have + msrvttqa_label_size = 401 # 400 + 1 + batch_size = 256 + max_epoch = 50 + max_steps = None + warmup_steps = 0.1 + draw_false_text = 15 + learning_rate = 3e-4 + val_check_interval = 1.0 +# end: ===================== action recognition ===================== + +# ================================ end: finetune ====================== +@ex.named_config +def step25k(): + max_epoch = 100 + max_steps = 25000 + + +@ex.named_config +def step50k(): + max_epoch = 100 + max_steps = 50000 + + +@ex.named_config +def step100k(): + max_epoch = 100 + max_steps = 100000 + + +@ex.named_config +def step200k(): + max_epoch = 200 + max_steps = 200000 + + +@ex.named_config +def step400k(): + max_epoch = 400 + max_steps = 400000 + + +@ex.named_config +def epoch1(): + max_epoch = 1 + max_steps = None + + +@ex.named_config +def vit32_base(): + vit = "vit_base_patch32_384" + patch_size = 32 + hidden_size = 768 + num_heads = 12 + num_layers = 12 + +# ============================= begin: clip_kc pretrain =================== +# = for msrvtt multiple choice +@ex.named_config +def clip_kc_finetune_msrvttchoice(): + exp_name = "clip_kc_finetune_msrvtt_choice" + video_datasets = ["msrvtt_choice"] + image_datasets = [] + loss_names = _loss_names({"multiple_choice": 1}) + batch_size = 512 + max_epoch = 10 + max_steps = None + warmup_steps = 0.1 + draw_false_text = 5 # 5 choices + learning_rate = 1e-4 + val_check_interval = 0.5 + lr_mult = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "kc" + + +@ex.named_config +def clip_kc_contrastive_howto_cc3m_choice(): + exp_name = "clip_kc_contrastive_howto_cc3m_choice" + video_datasets = ["howto100m"] + image_datasets = ["cc3m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1}) + batch_size = 1024 + max_epoch = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "kc" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc"] + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_kc_contrastive_2plus3_choice(): + exp_name = "clip_kc_contrastive_2plus3_choice" + video_datasets = ["webvid", "howto100m"] + image_datasets = ["cc3m", "cc12m", "yfcc15m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1}) + batch_size = 1024 + max_epoch = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "kc" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_kc_contrastive_3plus4_choice(): + exp_name = "clip_kc_contrastive_3plus4_choice" + video_datasets = ["webvid", "howto100m", "webvid10m"] + image_datasets = ["cc3m", "cc12m", "yfcc15m", "laion400m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1}) + batch_size = 1024 + max_epoch = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "kc" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_kc_contrastive_cap_3plus4_choice(): + exp_name = "clip_kc_contrastive_3plus4_choice" + video_datasets = ["webvid", "howto100m", "webvid10m"] + image_datasets = ["cc3m", "cc12m", "yfcc15m", "laion400m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1, "cap": 1}) + batch_size = 1024 + max_epoch = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "kc" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_kc_new_B16_vtc_cap_3plusM_choice(): + exp_name = "clip_kc_new_L14_vtc_cap_3plusM_choice" + video_datasets = ["webvid", "howto100m", "webvid10m"] + image_datasets = ["mix100m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1, "cap": 1}) + per_gpu_batchsize = 32 + num_frames = 8 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-4 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + decay_power = "cosine" + clip_lr_mult = 0.1 + weight_decay = 0.2 + clip_evl_dropout = 0.0 + clip_cap_decoder_n_layers = 6 + warmup_steps = 4000 + clip_alt_data = True + image_data_mult = 6 + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_kc_new_L14_vtc_cap_3plusM_choice(): + exp_name = "clip_kc_new_L14_vtc_cap_3plusM_choice" + video_datasets = ["webvid", "howto100m", "webvid10m"] + image_datasets = ["mix100m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1, "cap": 1}) + per_gpu_batchsize = 14 + num_frames = 8 + max_epoch = 10 + max_text_len = 77 + learning_rate = 8e-5 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-L-14.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + decay_power = "cosine" + clip_lr_mult = 0.1 + weight_decay = 0.2 + clip_evl_dropout = 0.0 + clip_cap_decoder_n_layers = 6 + warmup_steps = 4000 + clip_alt_data = True + image_data_mult = 6 + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_kc_new_L14_336_vtc_cap_4plusM_choice(): + exp_name = "clip_kc_new_L14_336_vtc_cap_4plusM_choice" + video_datasets = ["webvid", "howto100m", "webvid10m", "youtube"] + image_datasets = ["mix100m"] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + loss_names = _loss_names({"contrastive": 1, "cap": 1}) + image_size = 336 + per_gpu_batchsize = 24 + clip_use_checkpoint = True + clip_checkpoint_num = [23, 100, 100] + num_frames = 8 + max_epoch = 2 + max_steps = None + max_text_len = 77 + learning_rate = 4e-6 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-L-14-336px.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_text = 5 + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + decay_power = "cosine" + weight_decay = 0.2 + clip_evl_dropout = 0.0 + clip_cap_decoder_n_layers = 6 + warmup_steps = 2000 + clip_alt_data = True + image_data_mult = 6 + val_loss_names = _loss_names({"multiple_choice": 1}) +# ============================== end: clip_kc pretrain ==================== + + +@ex.named_config +def clip_finetune_msrvttqa(): + exp_name = "clip_finetune_msrvtt_qa" + video_datasets = ["msrvttqa"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) + batch_size = 512 + msrvttqa_label_size = 1501 # 1501 / 4540 + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 # 0.1 + draw_false_image = 1 + draw_false_text = 1 + learning_rate = 1e-4 # 1e-4 normal + val_check_interval = 1.0 + lr_mult = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "ori" + + +@ex.named_config +def clip_finetune_tgifqa(): + exp_name = "clip_finetune_tgif_qa" + video_datasets = ["tgif"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) + batch_size = 512 + msrvttqa_label_size = 1541 # vqa voculbary length 1540 + 1 background + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 + draw_false_image = 0 + learning_rate = 1e-4 # 1e-4 + val_check_interval = 1.0 + lr_mult = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "ori" + + +@ex.named_config +def clip_finetune_msvdqa(): + exp_name = "clip_finetune_msvd_qa" + video_datasets = ["msvdqa"] + image_datasets = [] + loss_names = _loss_names({"openend_vqa": 1}) # msvd have same number of answers with msrvtt + batch_size = 512 + msrvttqa_label_size = 1001 # vqa voculbary length 1000 + 1 background + max_epoch = 20 + max_steps = None + warmup_steps = 0.1 + draw_false_image = 0 + learning_rate = 1e-4 # 1e-4 + val_check_interval = 1.0 + lr_mult = 10 + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "ori" + + +@ex.named_config +def clip_finetune_zs_k400(): + exp_name = "clip_finetune_zs_k400" + video_datasets = ["k400_video"] + image_datasets = [] + loss_names = _loss_names({"zs_classify": 1}) + batch_size = 256 + test_only = True + max_text_len = 77 + clip = "/mnt/lustre/share_data/likunchang.vendor/code/EVL/ViT-B-16.pt" + clip_type = "ori" + + +# ============================== end: clip_kc pretrain ==================== +@ex.named_config +def clip_vtc_choice(): + exp_name = "clip_vtc_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1}) + + per_gpu_batchsize = 24 + num_frames = 16 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-4 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "ori" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + val_loss_names = _loss_names({"multiple_choice": 1}) + + +@ex.named_config +def clip_vtc_mim_choice(): + exp_name = "clip_vtc_mim_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1, "mim": 1}) + + per_gpu_batchsize = 128 + num_frames = 16 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-4 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "ori" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + val_loss_names = _loss_names({"multiple_choice": 1}) + mim_prob = 0.90 + clip_mim_decoder_n_layers = 1 + + +@ex.named_config +def clip_vtc_mim_mlm_choice(): + exp_name = "clip_vtc_mim_mlm_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1, "mim": 1, "mlm": 1}) + + per_gpu_batchsize = 128 + num_frames = 16 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-4 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "ori" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + val_loss_names = _loss_names({"multiple_choice": 1}) + mim_prob = 0.90 + clip_mim_decoder_n_layers = 1 + + +@ex.named_config +def clip_vtc_mlm_choice(): + exp_name = "clip_vtc_mlm_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1, "mlm": 1}) + + per_gpu_batchsize = 128 + num_frames = 16 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-4 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "ori" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + val_loss_names = _loss_names({"multiple_choice": 1}) + + +# = for msrvtt multiple choice +@ex.named_config +def clip_finetune_msrvttchoice(): + exp_name = "clip_finetune_msrvtt_choice" + video_datasets = ["webvid10m"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + image_datasets = [] + loss_names = _loss_names({"multiple_choice": 1}) + num_frames = 16 + batch_size = 512 + max_epoch = 10 + warmup_steps = 0.1 + draw_false_text = 5 # 5 choices + learning_rate = 1e-4 + val_check_interval = 0.5 + max_text_len = 77 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "ori" + +# ============================== end: clip_kc new nc pretrain ==================== +@ex.named_config +def clip_kc_nc_vtc_choice(): + exp_name = "clip_kc_nc_vtc_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1}) + + num_frames = 8 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-5 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + clip_freeze_text = True + val_loss_names = _loss_names({"multiple_choice": 1}) + per_gpu_batchsize = 32 + batch_size = 256 + + +@ex.named_config +def clip_kc_nc_vtc_mim_nd_choice(): + exp_name = "clip_kc_nc_vtc_mim_nd_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1, "mim": 1}) + + num_frames = 8 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-5 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + clip_freeze_text = True + mim_prob = 0.90 + val_loss_names = _loss_names({"multiple_choice": 1}) + per_gpu_batchsize = 32 + batch_size = 256 + clip_mim_decoder_n_layers = 0 + + +@ex.named_config +def clip_kc_nc_vtc_mlm_choice(): + exp_name = "clip_kc_nc_vtc_mlm_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1, "mlm": 1}) + + num_frames = 8 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-5 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + clip_freeze_text = True + val_loss_names = _loss_names({"multiple_choice": 1}) + per_gpu_batchsize = 32 + batch_size = 256 + clip_mim_decoder_n_layers = 0 + + +@ex.named_config +def clip_kc_nc_vtc_mim_nd_mlm_choice(): + exp_name = "clip_kc_nc_vtc_mim_nd_mlm_choice" + video_datasets = ["webvid10m"] + image_datasets = [] + train_transform_keys = ["open_clip"] + val_transform_keys = ["open_clip"] + val_datasets = ["msrvtt_choice", "lsmdc_choice"] + loss_names = _loss_names({"contrastive": 1, "mlm": 1, "mim": 1}) + + num_frames = 8 + max_epoch = 10 + max_text_len = 77 + learning_rate = 1e-5 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "kc_new" + vocab_size = 49408 + draw_false_video = 0 + draw_false_text = 5 + decay_power = "cosine" + weight_decay = 0.2 + warmup_steps = 4000 + clip_freeze_text = True + val_loss_names = _loss_names({"multiple_choice": 1}) + per_gpu_batchsize = 128 + batch_size = 1024 + clip_mim_decoder_n_layers = 0 + + +# = for msrvtt multiple choice +@ex.named_config +def clip_kc_nc_finetune_msrvttchoice(): + exp_name = "clip_kc_nc_finetune_msrvttchoice" + video_datasets=["msrvtt_choice", "lsmdc_choice"] + image_datasets = [] + loss_names = _loss_names({"multiple_choice": 1}) + num_frames = 8 + batch_size = 512 + max_epoch = 10 + warmup_steps = 0.1 + draw_false_text = 5 # 5 choices + learning_rate = 1e-4 + val_check_interval = 0.5 + max_text_len = 77 + clip = "/mnt/petrelfs/share_data/liyizhuo/pretrained/clip_pretrained_models/ViT-B-16.pt" + clip_type = "kc_new" + test_only = True \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/__init__.py new file mode 100755 index 0000000..560d17a --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/__init__.py @@ -0,0 +1,83 @@ +# pretrain dataset +## video +from CoTrain.datamodules.video.webvid_datamodule import WEBVIDDataModule +from CoTrain.datamodules.video.webvid10m_datamodule import WEBVID10MDataModule +from CoTrain.datamodules.video.howto100m_datamodule import HT100MDataModule +from CoTrain.datamodules.video.youtube_datamodule import YOUTUBEDataModule +from CoTrain.datamodules.video.yttemporal_datamodule import YTTemporalMDataModule +## image +from CoTrain.datamodules.image.cc3m_datamodule import CC3MDataModule +from CoTrain.datamodules.image.cc12m_datamodule import CC12MDataModule +from CoTrain.datamodules.image.yfcc15m_datamodule import YFCC15MDataModule +from CoTrain.datamodules.image.laion400m_datamodule import LAION400MDataModule +from CoTrain.datamodules.image.vg_caption_datamodule import VisualGenomeCaptionDataModule +from CoTrain.datamodules.image.coco_caption_karpathy_datamodule import CocoCaptionKarpathyDataModule +from CoTrain.datamodules.image.conceptual_caption_datamodule import ConceptualCaptionDataModule +from CoTrain.datamodules.image.sbu_datamodule import SBUCaptionDataModule +from CoTrain.datamodules.image.mix100m_datamodule import MIX100MDataModule +# finetune dataset +## image +from CoTrain.datamodules.image.f30k_caption_karpathy_datamodule import F30KCaptionKarpathyDataModule +from CoTrain.datamodules.image.vqav2_datamodule import VQAv2DataModule +from CoTrain.datamodules.image.nlvr2_datamodule import NLVR2DataModule +from CoTrain.datamodules.image.vcr_datamodule import VCRDataModule +## video +from CoTrain.datamodules.video.msrvtt_datamodule import MSRVTTDataModule +from CoTrain.datamodules.video.msrvttqa_datamodule import MSRVTTQADataModule +from CoTrain.datamodules.video.msrvtt_choice_datamodule import MSRVTTChoiceDataModule +from CoTrain.datamodules.video.msvd_datamodule import MSVDDataModule +from CoTrain.datamodules.video.msvdqa_datamodule import MSVDQADataModule +from CoTrain.datamodules.video.ego4d_datamodule import Ego4DDataModule +from CoTrain.datamodules.video.tvqa_datamodule import TVQADataModule +from CoTrain.datamodules.video.lsmdc_choice_datamodule import LSMDCChoiceDataModule +from CoTrain.datamodules.video.ego4d_choice_datamodule import EGO4DChoiceDataModule +from CoTrain.datamodules.video.tgif_datamodule import TGIFDataModule +from CoTrain.datamodules.video.tgifqa_datamodule import TGIFQADataModule +from CoTrain.datamodules.video.didemo_datamodule import DIDEMODataModule +from CoTrain.datamodules.video.hmdb51_datamodule import HMDB51DataModule +from CoTrain.datamodules.video.ucf101_datamodule import UCF101DataModule +from CoTrain.datamodules.video.k400_datamodule import K400DataModule +from CoTrain.datamodules.video.lsmdc_datamodule import LSMDCDataModule +from CoTrain.datamodules.video.k400_video_datamodule import K400VideoDataModule +from CoTrain.datamodules.image.activitynet_datamodule import ActivityNetDataModule + +_datamodules = { + # image + "vg": VisualGenomeCaptionDataModule, + "f30k": F30KCaptionKarpathyDataModule, + "coco": CocoCaptionKarpathyDataModule, + "gcc": ConceptualCaptionDataModule, + "sbu": SBUCaptionDataModule, + "vqa": VQAv2DataModule, + "nlvr2": NLVR2DataModule, + "cc3m": CC3MDataModule, + "cc12m": CC12MDataModule, + 'yfcc15m': YFCC15MDataModule, + 'laion400m': LAION400MDataModule, + 'vcr': VCRDataModule, + 'mix100m': MIX100MDataModule, + # video + 'howto100m': HT100MDataModule, + 'youtube': YOUTUBEDataModule, + 'webvid': WEBVIDDataModule, + 'webvid10m': WEBVID10MDataModule, + 'msrvtt': MSRVTTDataModule, + 'msrvttqa': MSRVTTQADataModule, + 'msrvtt_choice': MSRVTTChoiceDataModule, + 'msvd': MSVDDataModule, + 'msvdqa': MSVDQADataModule, + 'ego4d': Ego4DDataModule, + 'tvqa': TVQADataModule, + 'lsmdc_choice': LSMDCChoiceDataModule, + 'ego4d_choice': EGO4DChoiceDataModule, + 'yttemporal': YTTemporalMDataModule, + 'tgif': TGIFDataModule, + "tgifqa": TGIFQADataModule, + 'didemo': DIDEMODataModule, + 'hmdb51': HMDB51DataModule, + 'ucf101': UCF101DataModule, + 'k400': K400DataModule, + 'lsmdc': LSMDCDataModule, + 'activitynet': ActivityNetDataModule, + 'k400_video': K400VideoDataModule, +} diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/activitynet_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/activitynet_datamodule.py new file mode 100644 index 0000000..e7c33d6 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/activitynet_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import ActivityNetDataset +from .datamodule_base import BaseDataModule + + +class ActivityNetDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return ActivityNetDataset + + @property + def dataset_name(self): + return "activitynet" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc12m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc12m_datamodule.py new file mode 100644 index 0000000..0c17847 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc12m_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import CC12MDataset +from .datamodule_base import BaseDataModule + + +class CC12MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return CC12MDataset + + @property + def dataset_name(self): + return "cc3m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc3m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc3m_datamodule.py new file mode 100755 index 0000000..f8e488f --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc3m_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import CC3MDataset +from .datamodule_base import BaseDataModule + + +class CC3MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return CC3MDataset + + @property + def dataset_name(self): + return "cc3m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/coco_caption_karpathy_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/coco_caption_karpathy_datamodule.py new file mode 100755 index 0000000..d14edc3 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/coco_caption_karpathy_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import CocoCaptionKarpathyDataset +from .datamodule_base import BaseDataModule + + +class CocoCaptionKarpathyDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return CocoCaptionKarpathyDataset + + @property + def dataset_cls_no_false(self): + return CocoCaptionKarpathyDataset + + @property + def dataset_name(self): + return "coco" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/conceptual_caption_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/conceptual_caption_datamodule.py new file mode 100755 index 0000000..825d916 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/conceptual_caption_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import ConceptualCaptionDataset +from .datamodule_base import BaseDataModule + + +class ConceptualCaptionDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return ConceptualCaptionDataset + + @property + def dataset_name(self): + return "gcc" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/datamodule_base.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/datamodule_base.py new file mode 100755 index 0000000..54fc3e6 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/datamodule_base.py @@ -0,0 +1,204 @@ +import torch + +from pytorch_lightning import LightningDataModule +from torch.utils.data import DataLoader +from transformers import ( + DataCollatorForLanguageModeling, + DataCollatorForWholeWordMask, + BertTokenizer, +) + + +def get_pretrained_tokenizer(from_pretrained): + if torch.distributed.is_initialized(): + if torch.distributed.get_rank() == 0: + BertTokenizer.from_pretrained( + from_pretrained, do_lower_case="uncased" in from_pretrained + ) + torch.distributed.barrier() + return BertTokenizer.from_pretrained( + from_pretrained, do_lower_case="uncased" in from_pretrained + ) + + +class BaseDataModule(LightningDataModule): + def __init__(self, _config): + super().__init__() + + self.data_dir = _config["data_root"] + + self.num_workers = _config["num_workers"] + self.batch_size = _config["per_gpu_batchsize"] + self.eval_batch_size = self.batch_size + + self.image_size = _config["image_size"] + self.max_text_len = _config["max_text_len"] + self.draw_false_video = _config["draw_false_video"] + self.draw_false_image = _config["draw_false_image"] + self.draw_false_text = _config["draw_false_text"] + self.image_only = _config["video_only"] + + + self.num_frames = _config["num_frames"] + self.draw_options_text = _config["draw_options_text"] + self.backend = _config["backend"] + + self.train_transform_keys = ( + ["default_train"] + if len(_config["train_transform_keys"]) == 0 + else _config["train_transform_keys"] + ) + + self.val_transform_keys = ( + ["default_val"] + if len(_config["val_transform_keys"]) == 0 + else _config["val_transform_keys"] + ) + + tokenizer = _config["tokenizer"] + self.tokenizer = get_pretrained_tokenizer(tokenizer) + self.vocab_size = self.tokenizer.vocab_size + + collator = ( + DataCollatorForWholeWordMask + if _config["whole_word_masking"] + else DataCollatorForLanguageModeling + ) + + self.mlm_collator = collator( + tokenizer=self.tokenizer, mlm=True, mlm_probability=_config["mlm_prob"] + ) + self.setup_flag = False + + @property + def dataset_cls(self): + raise NotImplementedError("return tuple of dataset class") + + @property + def dataset_name(self): + raise NotImplementedError("return name of dataset") + + def set_train_dataset(self): + self.train_dataset = self.dataset_cls( + self.data_dir, + self.train_transform_keys, + split="train", + image_size=self.image_size, + max_text_len=self.max_text_len, + draw_false_image=self.draw_false_image, + draw_false_video=self.draw_false_video, + draw_false_text=self.draw_false_text, + image_only=self.image_only, + num_frames=self.num_frames, + draw_options_text=self.draw_options_text, + backend=self.backend + ) + + def set_val_dataset(self): + self.val_dataset = self.dataset_cls( + self.data_dir, + self.val_transform_keys, + split="val", + image_size=self.image_size, + max_text_len=self.max_text_len, + draw_false_image=self.draw_false_image, + draw_false_video=self.draw_false_video, + draw_false_text=self.draw_false_text, + image_only=self.image_only, + num_frames=self.num_frames, + draw_options_text=self.draw_options_text, + backend=self.backend + ) + + if hasattr(self, "dataset_cls_no_false"): + self.val_dataset_no_false = self.dataset_cls_no_false( + self.data_dir, + self.val_transform_keys, + split="val", + image_size=self.image_size, + max_text_len=self.max_text_len, + draw_false_image=0, + draw_false_video=0, + draw_false_text=0, + image_only=self.image_only, + num_frames=self.num_frames, + draw_options_text=self.draw_options_text, + backend=self.backend + ) + + def make_no_false_val_dset(self, image_only=False): + return self.dataset_cls_no_false( + self.data_dir, + self.val_transform_keys, + split="val", + image_size=self.image_size, + max_text_len=self.max_text_len, + draw_false_image=0, + draw_false_video=0, + draw_false_text=0, + image_only=image_only, + num_frames=self.num_frames, + draw_options_text=self.draw_options_text, + backend=self.backend + ) + + def set_test_dataset(self): + self.test_dataset = self.dataset_cls( + self.data_dir, + self.val_transform_keys, + split="test", + image_size=self.image_size, + max_text_len=self.max_text_len, + draw_false_image=self.draw_false_image, + draw_false_video=self.draw_false_video, + draw_false_text=self.draw_false_text, + image_only=self.image_only, + num_frames=self.num_frames, + draw_options_text=self.draw_options_text, + backend=self.backend + ) + + def setup(self, stage): + if not self.setup_flag: + self.set_train_dataset() + self.set_val_dataset() + self.set_test_dataset() + + self.train_dataset.tokenizer = self.tokenizer + self.val_dataset.tokenizer = self.tokenizer + self.test_dataset.tokenizer = self.tokenizer + + self.setup_flag = True + + def train_dataloader(self): + loader = DataLoader( + self.train_dataset, + batch_size=self.batch_size, + shuffle=True, + num_workers=self.num_workers, + pin_memory=True, + collate_fn=self.train_dataset.collate, + ) + return loader + + def val_dataloader(self): + loader = DataLoader( + self.val_dataset, + batch_size=self.eval_batch_size, + shuffle=False, + num_workers=self.num_workers, + pin_memory=True, + collate_fn=self.val_dataset.collate, + ) + return loader + + def test_dataloader(self): + loader = DataLoader( + self.test_dataset, + batch_size=self.eval_batch_size, + shuffle=False, + num_workers=self.num_workers, + pin_memory=True, + collate_fn=self.test_dataset.collate, + ) + return loader diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/f30k_caption_karpathy_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/f30k_caption_karpathy_datamodule.py new file mode 100755 index 0000000..2696cf2 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/f30k_caption_karpathy_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import F30KCaptionKarpathyDataset +from .datamodule_base import BaseDataModule + + +class F30KCaptionKarpathyDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return F30KCaptionKarpathyDataset + + @property + def dataset_cls_no_false(self): + return F30KCaptionKarpathyDataset + + @property + def dataset_name(self): + return "f30k" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/laion400m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/laion400m_datamodule.py new file mode 100644 index 0000000..9aae8d2 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/laion400m_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import LAION400MDataset +from .datamodule_base import BaseDataModule + + +class LAION400MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return LAION400MDataset + + @property + def dataset_name(self): + return "laion400m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/mix100m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/mix100m_datamodule.py new file mode 100644 index 0000000..c28912f --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/mix100m_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import MIX100MDataset +from .datamodule_base import BaseDataModule + + +class MIX100MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return MIX100MDataset + + @property + def dataset_name(self): + return "mix100m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/nlvr2_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/nlvr2_datamodule.py new file mode 100755 index 0000000..1ecc55d --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/nlvr2_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import NLVR2Dataset +from .datamodule_base import BaseDataModule + + +class NLVR2DataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return NLVR2Dataset + + @property + def dataset_name(self): + return "nlvr2" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/sbu_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/sbu_datamodule.py new file mode 100755 index 0000000..2cfa113 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/sbu_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import SBUCaptionDataset +from .datamodule_base import BaseDataModule + + +class SBUCaptionDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return SBUCaptionDataset + + @property + def dataset_name(self): + return "sbu" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vcr_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vcr_datamodule.py new file mode 100644 index 0000000..6f844b3 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vcr_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import VCRDataset +from .datamodule_base import BaseDataModule + + +class VCRDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return VCRDataset + + @property + def dataset_name(self): + return "vcr" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vg_caption_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vg_caption_datamodule.py new file mode 100755 index 0000000..db6bc55 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vg_caption_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import VisualGenomeCaptionDataset +from .datamodule_base import BaseDataModule + + +class VisualGenomeCaptionDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return VisualGenomeCaptionDataset + + @property + def dataset_name(self): + return "vg" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vqav2_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vqav2_datamodule.py new file mode 100755 index 0000000..70970bb --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vqav2_datamodule.py @@ -0,0 +1,36 @@ +from CoTrain.datasets import VQAv2Dataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule +from collections import defaultdict + + +class VQAv2DataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return VQAv2Dataset + + @property + def dataset_name(self): + return "vqa" + + def setup(self, stage): + super().setup(stage) + + train_answers = self.train_dataset.table["answers"].to_pandas().tolist() + val_answers = self.val_dataset.table["answers"].to_pandas().tolist() + train_labels = self.train_dataset.table["answer_labels"].to_pandas().tolist() + val_labels = self.val_dataset.table["answer_labels"].to_pandas().tolist() + + all_answers = [c for c in train_answers + val_answers if c is not None] + all_answers = [l for lll in all_answers for ll in lll for l in ll] + all_labels = [c for c in train_labels + val_labels if c is not None] + all_labels = [l for lll in all_labels for ll in lll for l in ll] + + self.answer2id = {k: v for k, v in zip(all_answers, all_labels)} + sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1]) + self.num_class = max(self.answer2id.values()) + 1 + self.id2answer = defaultdict(lambda: "unknown") + for k, v in sorted_a2i: + self.id2answer[v] = k diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/yfcc15m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/yfcc15m_datamodule.py new file mode 100644 index 0000000..2847de6 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/image/yfcc15m_datamodule.py @@ -0,0 +1,15 @@ +from CoTrain.datasets import YFCC15MDataset +from .datamodule_base import BaseDataModule + + +class YFCC15MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return YFCC15MDataset + + @property + def dataset_name(self): + return "yfcc15m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/didemo_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/didemo_datamodule.py new file mode 100644 index 0000000..bd0062c --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/didemo_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import DIDEMODataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class DIDEMODataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return DIDEMODataset + + @property + def dataset_cls_no_false(self): + return DIDEMODataset + + @property + def dataset_name(self): + return "didemo" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_choice_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_choice_datamodule.py new file mode 100644 index 0000000..a4c0c04 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_choice_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import EGO4DChoiceDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class EGO4DChoiceDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return EGO4DChoiceDataset + + @property + def dataset_cls_no_false(self): + return EGO4DChoiceDataset + + @property + def dataset_name(self): + return "ego4d_choice" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_datamodule.py new file mode 100644 index 0000000..64d39ec --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import Ego4DDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class Ego4DDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return Ego4DDataset + + @property + def dataset_cls_no_false(self): + return Ego4DDataset + + @property + def dataset_name(self): + return "ego4d" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/hmdb51_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/hmdb51_datamodule.py new file mode 100644 index 0000000..7fc4602 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/hmdb51_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import HMDB51Dataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class HMDB51DataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return HMDB51Dataset + + @property + def dataset_cls_no_false(self): + return HMDB51Dataset + + @property + def dataset_name(self): + return "hmdb51" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/howto100m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/howto100m_datamodule.py new file mode 100644 index 0000000..05d4008 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/howto100m_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import HT100MDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class HT100MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return HT100MDataset + + @property + def dataset_cls_no_false(self): + return HT100MDataset + + @property + def dataset_name(self): + return "howto100m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_datamodule.py new file mode 100644 index 0000000..62b8e01 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import K400Dataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class K400DataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return K400Dataset + + @property + def dataset_cls_no_false(self): + return K400Dataset + + @property + def dataset_name(self): + return "k400" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_video_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_video_datamodule.py new file mode 100644 index 0000000..19eb272 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_video_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import K400VideoDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class K400VideoDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return K400VideoDataset + + @property + def dataset_cls_no_false(self): + return K400VideoDataset + + @property + def dataset_name(self): + return "k400_video" \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_choice_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_choice_datamodule.py new file mode 100644 index 0000000..6e66823 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_choice_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import LSMDCChoiceDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class LSMDCChoiceDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return LSMDCChoiceDataset + + @property + def dataset_cls_no_false(self): + return LSMDCChoiceDataset + + @property + def dataset_name(self): + return "lsmdc_choice" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_datamodule.py new file mode 100644 index 0000000..060ed48 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import LSMDCDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class LSMDCDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return LSMDCDataset + + @property + def dataset_cls_no_false(self): + return LSMDCDataset + + @property + def dataset_name(self): + return "lsmdc" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_choice_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_choice_datamodule.py new file mode 100644 index 0000000..9769492 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_choice_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import MSRVTTChoiceDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class MSRVTTChoiceDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return MSRVTTChoiceDataset + + @property + def dataset_cls_no_false(self): + return MSRVTTChoiceDataset + + @property + def dataset_name(self): + return "msrvtt_choice" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_datamodule.py new file mode 100644 index 0000000..4f49ca5 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import MSRVTTDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class MSRVTTDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return MSRVTTDataset + + @property + def dataset_cls_no_false(self): + return MSRVTTDataset + + @property + def dataset_name(self): + return "msrvtt" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvttqa_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvttqa_datamodule.py new file mode 100644 index 0000000..6445ba9 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvttqa_datamodule.py @@ -0,0 +1,25 @@ +from CoTrain.datasets import MSRVTTQADataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule +from collections import defaultdict + + +class MSRVTTQADataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return MSRVTTQADataset + + @property + def dataset_name(self): + return "msrvttqa" + + def setup(self, stage): + super().setup(stage) + self.answer2id = self.train_dataset.ans_lab_dict + sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1]) + self.num_class = max(self.answer2id.values()) + 1 + self.id2answer = defaultdict(lambda: "unknown") + for k, v in sorted_a2i: + self.id2answer[v] = k diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvd_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvd_datamodule.py new file mode 100644 index 0000000..1730abd --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvd_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import MSVDDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class MSVDDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return MSVDDataset + + @property + def dataset_cls_no_false(self): + return MSVDDataset + + @property + def dataset_name(self): + return "msvd" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvdqa_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvdqa_datamodule.py new file mode 100644 index 0000000..724b0a9 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvdqa_datamodule.py @@ -0,0 +1,25 @@ +from CoTrain.datasets import MSVDQADataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule +from collections import defaultdict + + +class MSVDQADataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return MSVDQADataset + + @property + def dataset_name(self): + return "msvdqa" + + def setup(self, stage): + super().setup(stage) + self.answer2id = self.train_dataset.ans_lab_dict + sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1]) + self.num_class = max(self.answer2id.values()) + 1 + self.id2answer = defaultdict(lambda: "unknown") + for k, v in sorted_a2i: + self.id2answer[v] = k diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/multitask_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/multitask_datamodule.py new file mode 100755 index 0000000..0cd6b79 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/multitask_datamodule.py @@ -0,0 +1,214 @@ +import functools + +from pytorch_lightning import LightningDataModule +from torch.utils.data import DataLoader +from torch.utils.data.dataset import ConcatDataset +from torch.utils.data.distributed import DistributedSampler +from pytorch_lightning.trainer.supporters import CombinedLoader + +from CoTrain.datamodules import _datamodules + + +class MTDataModule(LightningDataModule): + def __init__(self, _config, dist=False): + video_datamodule_keys = _config["video_datasets"] + image_datamodule_keys = _config["image_datasets"] + self.num_video_datasets = len(video_datamodule_keys) + self.num_image_datasets = len(image_datamodule_keys) + + assert self.num_video_datasets > 0 or self.num_image_datasets > 0 + + super().__init__() + if self.num_video_datasets > 0: + self.video_dm_keys = video_datamodule_keys + self.video_dm_dicts = {key: _datamodules[key](_config) for key in video_datamodule_keys} + self.video_dms = [v for k, v in self.video_dm_dicts.items()] + self.video_batch_size = self.video_dms[0].batch_size + self.video_vocab_size = self.video_dms[0].vocab_size + self.video_num_workers = self.video_dms[0].num_workers + if self.num_image_datasets: + self.image_dm_keys = image_datamodule_keys + self.image_dm_dicts = {key: _datamodules[key](_config) for key in image_datamodule_keys} + self.image_dms = [v for k, v in self.image_dm_dicts.items()] + self.image_batch_size = self.image_dms[0].batch_size * _config["image_data_mult"] + self.image_vocab_size = self.image_dms[0].vocab_size + self.image_num_workers = self.image_dms[0].num_workers + self.dist = dist + + # We add extra val datamodules so that we can use different dataset in train and val + # We assume all val datasets are video datasets + self.val_dm_keys = _config["val_datasets"] + self.val_dm_dicts = { + key: _datamodules[key](_config) for key in self.val_dm_keys + } + self.val_dms = [v for k, v in self.val_dm_dicts.items()] + + self.pin_memory = False + + def prepare_data(self): + if self.num_video_datasets: + for dm in self.video_dms: + dm.prepare_data() + if self.num_image_datasets: + for dm in self.image_dms: + dm.prepare_data() + for dm in self.val_dms: + dm.prepare_data() + + def setup(self, stage): + if self.num_video_datasets: + for dm in self.video_dms: + dm.setup(stage) + if self.num_image_datasets: + for dm in self.image_dms: + dm.setup(stage) + for dm in self.val_dms: + dm.setup(stage) + + if self.num_video_datasets: + self.video_train_dataset = ConcatDataset([dm.train_dataset for dm in self.video_dms]) + self.video_val_dataset = ConcatDataset([dm.val_dataset for dm in self.video_dms]) + self.video_test_dataset = ConcatDataset([dm.test_dataset for dm in self.video_dms]) + + if self.num_image_datasets: + self.image_train_dataset = ConcatDataset([dm.train_dataset for dm in self.image_dms]) + self.image_val_dataset = ConcatDataset([dm.val_dataset for dm in self.image_dms]) + self.image_test_dataset = ConcatDataset([dm.test_dataset for dm in self.image_dms]) + + if len(self.val_dms) == 0: + self.val_dataset = None + else: + self.val_dataset = ConcatDataset([dm.val_dataset for dm in self.val_dms]) + + if len(self.video_dms) > 0: + self.tokenizer = self.video_dms[0].tokenizer + else: + self.tokenizer = self.image_dms[0].tokenizer + + if self.num_video_datasets: + self.video_collate = functools.partial( + self.video_dms[0].train_dataset.collate, mlm_collator=self.video_dms[0].mlm_collator, + ) + if self.num_image_datasets: + self.image_collate = functools.partial( + self.image_dms[0].train_dataset.collate, mlm_collator=self.image_dms[0].mlm_collator, + ) + + if self.dist: + if self.num_video_datasets: + self.video_train_sampler = DistributedSampler(self.video_train_dataset, shuffle=False) + self.video_val_sampler = DistributedSampler(self.video_val_dataset, shuffle=False) + self.video_test_sampler = DistributedSampler(self.video_test_dataset, shuffle=False) + if self.num_image_datasets: + self.image_train_sampler = DistributedSampler(self.image_train_dataset, shuffle=False) + self.image_val_sampler = DistributedSampler(self.image_val_dataset, shuffle=False) + self.image_test_sampler = DistributedSampler(self.image_test_dataset, shuffle=False) + if self.val_dataset is not None: + self.val_sampler = DistributedSampler(self.val_dataset, shuffle=False) + else: + self.video_train_sampler = None + self.video_val_sampler = None + self.video_test_sampler = None + self.image_train_sampler = None + self.image_val_sampler = None + self.image_test_sampler = None + + def train_dataloader(self): + if self.num_video_datasets: + video_loader = DataLoader( + self.video_train_dataset, + batch_size=self.video_batch_size, + sampler=self.video_train_sampler, + num_workers=self.video_num_workers, + pin_memory=self.pin_memory, + collate_fn=self.video_collate, + ) + if self.num_image_datasets: + image_loader = DataLoader( + self.image_train_dataset, + batch_size=self.image_batch_size, + sampler=self.image_train_sampler, + num_workers=self.image_num_workers, + pin_memory=self.pin_memory, + collate_fn=self.image_collate, + ) + if self.num_video_datasets and self.num_image_datasets: + loaders = {"v": video_loader, "i": image_loader} + combined_loader = CombinedLoader(loaders, mode="min_size") # "min_size" / "max_size_cycle", + else: + if self.num_video_datasets: + combined_loader = video_loader + else: + combined_loader = image_loader + return combined_loader + + def val_dataloader(self, batch_size=None): + # Skip all other datasets if we have different val datasets + if self.val_dataset is not None: + return DataLoader( + self.val_dataset, + # batch_size=batch_size if batch_size is not None else self.video_batch_size * 4, + batch_size=batch_size if batch_size is not None else self.video_batch_size, + sampler=self.val_sampler, + num_workers=self.video_num_workers // 2, + pin_memory=self.pin_memory, + collate_fn=self.video_collate, + ) + if self.num_video_datasets: + video_loader = DataLoader( + self.video_val_dataset, + batch_size=batch_size if batch_size is not None else self.video_batch_size, + sampler=self.video_val_sampler, + num_workers=self.video_num_workers, + pin_memory=self.pin_memory, + collate_fn=self.video_collate, + ) + if self.num_image_datasets: + image_loader = DataLoader( + self.image_val_dataset, + batch_size=batch_size if batch_size is not None else self.image_batch_size, + sampler=self.image_val_sampler, + num_workers=self.image_num_workers, + pin_memory=self.pin_memory, + collate_fn=self.image_collate, + ) + if self.num_video_datasets and self.num_image_datasets: + loaders = {"v": video_loader, "i": image_loader} + combined_loader = CombinedLoader(loaders, mode="min_size") # min_size / max_size_cycle + else: + if self.num_video_datasets: + combined_loader = video_loader + else: + combined_loader = image_loader + return combined_loader + + + def test_dataloader(self): + if self.num_video_datasets: + video_loader = DataLoader( + self.video_test_dataset, + batch_size=self.video_batch_size, + sampler=self.video_test_sampler, + num_workers=self.video_num_workers, + pin_memory=self.pin_memory, + collate_fn=self.video_collate, + ) + if self.num_image_datasets: + image_loader = DataLoader( + self.image_test_dataset, + batch_size=self.image_batch_size, + sampler=self.image_test_sampler, + num_workers=self.image_num_workers, + pin_memory=self.pin_memory, + collate_fn=self.image_collate, + ) + if self.num_video_datasets and self.num_image_datasets: + loaders = {"v": video_loader, "i": image_loader} + combined_loader = CombinedLoader(loaders, mode="min_size") # min_size / max_size_cycle + else: + if self.num_video_datasets: + combined_loader = video_loader + else: + combined_loader = image_loader + return combined_loader + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgif_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgif_datamodule.py new file mode 100644 index 0000000..e338255 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgif_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import TGIFDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class TGIFDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return TGIFDataset + + @property + def dataset_cls_no_false(self): + return TGIFDataset + + @property + def dataset_name(self): + return "tgif" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgifqa_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgifqa_datamodule.py new file mode 100644 index 0000000..ba3e9c9 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgifqa_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import TGIFQADataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class TGIFQADataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return TGIFQADataset + + @property + def dataset_cls_no_false(self): + return TGIFQADataset + + @property + def dataset_name(self): + return "tgifqa" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tvqa_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tvqa_datamodule.py new file mode 100644 index 0000000..3b40ede --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tvqa_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import TVQADataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class TVQADataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return TVQADataset + + @property + def dataset_cls_no_false(self): + return TVQADataset + + @property + def dataset_name(self): + return "tvqa" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ucf101_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ucf101_datamodule.py new file mode 100644 index 0000000..c6e704b --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ucf101_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import UCF101Dataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class UCF101DataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return UCF101Dataset + + @property + def dataset_cls_no_false(self): + return UCF101Dataset + + @property + def dataset_name(self): + return "ucf101" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid10m_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid10m_datamodule.py new file mode 100644 index 0000000..f2b2f90 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid10m_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import WEBVID10MDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class WEBVID10MDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return WEBVID10MDataset + + @property + def dataset_cls_no_false(self): + return WEBVID10MDataset + + @property + def dataset_name(self): + return "webvid10m" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid_datamodule.py new file mode 100644 index 0000000..a3765ca --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import WEBVIDDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class WEBVIDDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return WEBVIDDataset + + @property + def dataset_cls_no_false(self): + return WEBVIDDataset + + @property + def dataset_name(self): + return "webvid" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/youtube_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/youtube_datamodule.py new file mode 100644 index 0000000..44af759 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/youtube_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import YOUTUBEDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class YOUTUBEDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return YOUTUBEDataset + + @property + def dataset_cls_no_false(self): + return YOUTUBEDataset + + @property + def dataset_name(self): + return "youtube" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/yttemporal_datamodule.py b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/yttemporal_datamodule.py new file mode 100644 index 0000000..aa56607 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datamodules/video/yttemporal_datamodule.py @@ -0,0 +1,19 @@ +from CoTrain.datasets import YTTemporalDataset +from CoTrain.datamodules.image.datamodule_base import BaseDataModule + + +class YTTemporalMDataModule(BaseDataModule): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def dataset_cls(self): + return YTTemporalDataset + + @property + def dataset_cls_no_false(self): + return YTTemporalDataset + + @property + def dataset_name(self): + return "yttemporal" diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/__init__.py new file mode 100644 index 0000000..91f3230 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/__init__.py @@ -0,0 +1,55 @@ +try: + from petrel_client.client import Client + + client = Client() + + # Disable boto logger + import logging + logging.getLogger('boto3').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.WARNING) + logging.getLogger('nose').setLevel(logging.WARNING) +except: + client = None + +# == pretrain data +# = image +from .image.vg_caption_dataset import VisualGenomeCaptionDataset +from .image.coco_caption_karpathy_dataset import CocoCaptionKarpathyDataset +from .image.sbu_caption_dataset import SBUCaptionDataset +from .image.cc3m import CC3MDataset +from .image.cc12m import CC12MDataset +from .image.yfcc15m import YFCC15MDataset +from .image.laion400m import LAION400MDataset +from .image.conceptual_caption_dataset import ConceptualCaptionDataset +from .image.mix100m import MIX100MDataset +# = video +from .video.webvid import WEBVIDDataset +from .video.webvid10m import WEBVID10MDataset +from .video.howto100m import HT100MDataset +from .video.youtube import YOUTUBEDataset +from .video.yttemporal import YTTemporalDataset +# == downstream data +# = image +from .image.f30k_caption_karpathy_dataset import F30KCaptionKarpathyDataset +from .image.vqav2_dataset import VQAv2Dataset +from .image.nlvr2_dataset import NLVR2Dataset +from .image.vcr import VCRDataset +# = video +from .video.msrvtt import MSRVTTDataset +from .video.msrvttqa import MSRVTTQADataset +from .video.msrvtt_choice import MSRVTTChoiceDataset +from .video.msvd import MSVDDataset +from .video.lsmdc_dataset import LSMDCDataset +from .video.msvdqa import MSVDQADataset +from .video.ego4d import Ego4DDataset +from .video.tvqa import TVQADataset +from .video.lsmdc_choice import LSMDCChoiceDataset +from .video.ego4d_choice import EGO4DChoiceDataset +from .video.tgif import TGIFDataset +from .video.tgifqa import TGIFQADataset +from .video.didemo import DIDEMODataset +from .video.hmdb51 import HMDB51Dataset +from .video.ucf101 import UCF101Dataset +from .video.k400 import K400Dataset +from .video.activitynet import ActivityNetDataset +from .video.k400_video import K400VideoDataset \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/base_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/base_dataset.py new file mode 100644 index 0000000..20dd856 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/base_dataset.py @@ -0,0 +1,461 @@ +import random +import torch +import io +import pyarrow as pa +import os +import cv2 +import numpy as np +from PIL import Image +from CoTrain.transforms import keys_to_transforms +import decord +from CoTrain.transforms.image.imageaug import image_aug +import CoTrain.modules.InternVideo as internvideo + + +class BaseDataset(torch.utils.data.Dataset): + def __init__( + self, + data_dir: str, + transform_keys: list, + image_size: int, + names: list, + text_column_name: str = "", + remove_duplicate=True, + max_text_len=40, + draw_false_image=0, + draw_false_video=0, + draw_false_text=0, + image_only=False, + num_frames=1, + draw_options_text=0, + backend='v100' + ): + """ + data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data + transform_keys : keys for generating augmented views of images + text_column_name : pyarrow table column name that has list of strings as elements + """ + assert len(transform_keys) >= 1 + super().__init__() + + self.transforms = keys_to_transforms(transform_keys, size=image_size, mode=self.split) + self.image_aug = image_aug + self.text_column_name = text_column_name + self.names = names + self.max_text_len = max_text_len + self.draw_false_image = draw_false_image + self.draw_false_text = draw_false_text + self.image_only = image_only + self.data_dir = data_dir + self.draw_options_text = draw_options_text + if torch.distributed.get_rank() == 0: + print('*'*100) + print("image sub datasets: {}".format(names)) + # print(names) + split_name = None + if len(names) != 0: + self.data_dir = os.path.join(self.data_dir, names[0].split('_')[0]) # e.g. coco_train -> coco + split_name = names[0].split('_')[0] + if torch.distributed.get_rank() == 0: + print(self.data_dir) + if split_name and split_name in ['msrvtt', 'cc3m', 'vcr', 'cc12m', 'yfcc15m', 'laion400m', 'mix100m']: + if torch.distributed.get_rank() == 0: + print("no arrow available for {}, load from disk".format(names[0])) + else: + if len(names) != 0: + tables = [ + pa.ipc.RecordBatchFileReader( + pa.memory_map(f"{self.data_dir}/{name}.arrow", "r") + ).read_all() + for name in names + if os.path.isfile(f"{self.data_dir}/{name}.arrow") + ] + # print(names, tables) + self.table_names = list() + for i, name in enumerate(names): + self.table_names += [name] * len(tables[i]) + + self.table = pa.concat_tables(tables, promote=True) + if text_column_name != "": + self.text_column_name = text_column_name + self.all_texts = self.table[text_column_name].to_pandas().tolist() + self.all_texts = ( + [list(set(texts)) for texts in self.all_texts] + if remove_duplicate + else self.all_texts + ) + else: + self.all_texts = list() + else: + self.all_texts = list() + + self.index_mapper = dict() + + if text_column_name != "" and not self.image_only: + j = 0 + for i, texts in enumerate(self.all_texts): + for _j in range(len(texts)): + self.index_mapper[j] = (i, _j) + j += 1 + else: + for i in range(len(self.table)): + self.index_mapper[i] = (i, None) + + # + # if len(names) != 0: + # tables = [ + # pa.ipc.RecordBatchFileReader( + # pa.memory_map(f"{data_dir}/{name}.arrow", "r") + # ).read_all() + # for name in names + # if os.path.isfile(f"{data_dir}/{name}.arrow") + # ] + # + # self.table_names = list() + # for i, name in enumerate(names): + # self.table_names += [name] * len(tables[i]) + # + # self.table = pa.concat_tables(tables, promote=True) + # if text_column_name != "": + # self.text_column_name = text_column_name + # self.all_texts = self.table[text_column_name].to_pandas().tolist() + # self.all_texts = ( + # [list(set(texts)) for texts in self.all_texts] + # if remove_duplicate + # else self.all_texts + # ) + # else: + # self.all_texts = list() + # else: + # self.all_texts = list() + # + # self.index_mapper = dict() + # + # if text_column_name != "" and not self.image_only: + # j = 0 + # for i, texts in enumerate(self.all_texts): + # for _j in range(len(texts)): + # self.index_mapper[j] = (i, _j) + # j += 1 + # else: + # for i in range(len(self.table)): + # self.index_mapper[i] = (i, None) + + @property + def corpus(self): + return [text for texts in self.all_texts for text in texts] + + def __len__(self): + return len(self.index_mapper) + + def get_raw_image(self, index, image_key="image"): + index, caption_index = self.index_mapper[index] + image_bytes = io.BytesIO(self.table[image_key][index].as_py()) + image_bytes.seek(0) + return Image.open(image_bytes).convert("RGB") + + def get_image(self, index, image_key="image"): + image = self.get_raw_image(index, image_key=image_key) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + image_tensor = self.image_aug(image, self.transforms) + return { + "video": image_tensor, + "vid_index": self.index_mapper[index][0], + "cap_index": self.index_mapper[index][1], + "raw_index": index, + } + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.index_mapper) - 1) + image = self.get_raw_image(random_index, image_key=image_key) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + image_tensor = self.image_aug(image, self.transforms) + return {f"false_video_{rep}": image_tensor} + + def get_text(self, raw_index): + index, caption_index = self.index_mapper[raw_index] + + text = self.all_texts[index][caption_index] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return { + "text": (text, encoding), + "vid_index": index, + "cap_index": caption_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.index_mapper) - 1) + index, caption_index = self.index_mapper[random_index] + text = self.all_texts[index][caption_index] + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + while result is None: + try: + ret = dict() + ret.update(self.get_image(index)) + if not self.image_only: + txt = self.get_text(index) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_image(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print(f"Error while read file idx {index} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.index_mapper) - 1) + return ret + + def collate(self, batch, mlm_collator): + batch_size = len(batch) + keys = set([key for b in batch for key in b.keys()]) + dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys} + # print(dict_batch) + + img_keys = [k for k in list(dict_batch.keys()) if "video" in k] + img_sizes = list() + + for img_key in img_keys: + img_sizes += [ii.shape for i in dict_batch[img_key] if i is not None for ii in i] + + for size in img_sizes: + assert ( + len(size) == 4 + ), f"Collate error, an image should be in shape of (N, 3, H, W), instead of given {size}" + + if len(img_keys) != 0: + global_max_height = max([i[2] for i in img_sizes]) + global_max_width = max([i[3] for i in img_sizes]) + for img_key in img_keys: + img = dict_batch[img_key] + view_size = len(dict_batch[img_key][0]) + new_images = [ + torch.zeros(batch_size, 1, 3, global_max_height, global_max_width) + for _ in range(view_size) + ] + # print(len(img)) + for bi in range(batch_size): + orig_batch = img[bi] + for vi in range(view_size): + if orig_batch is None: + continue + else: + orig = img[bi][vi] + new_images[vi][bi, :, :, : orig.shape[2], : orig.shape[3]] = orig + + dict_batch[img_key] = new_images + + txt_keys = [k for k in list(dict_batch.keys()) if "text" in k] + + if len(txt_keys) != 0: + texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys] + encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys] + draw_text_len = len(encodings) + flatten_encodings = [e for encoding in encodings for e in encoding] + flatten_mlms = mlm_collator(flatten_encodings) + + for i, txt_key in enumerate(txt_keys): + texts, encodings = ( + [d[0] for d in dict_batch[txt_key]], + [d[1] for d in dict_batch[txt_key]], + ) + + mlm_ids, mlm_labels = ( + flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)], + flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)], + ) + + input_ids = torch.zeros_like(mlm_ids) + attention_mask = torch.zeros_like(mlm_ids) + for _i, encoding in enumerate(encodings): + _input_ids, _attention_mask = ( + torch.tensor(encoding["input_ids"]), + torch.tensor(encoding["attention_mask"]), + ) + input_ids[_i, : len(_input_ids)] = _input_ids + attention_mask[_i, : len(_attention_mask)] = _attention_mask + + dict_batch[txt_key] = texts + dict_batch[f"{txt_key}_ids"] = input_ids + dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100) + dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids + dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels + dict_batch[f"{txt_key}_masks"] = attention_mask + + clip_text_ids, clip_special_tokens_mask = internvideo.tokenize( + dict_batch["text"], truncate=True, return_special_tokens_mask=True) + dict_batch["clip_text_ids"] = clip_text_ids + dict_batch["clip_special_tokens_mask"] = clip_special_tokens_mask + + return dict_batch + + + # def collate(self, batch, mlm_collator): + # batch_size = len(batch) + # keys = set([key for b in batch for key in b.keys()]) + # dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys} + # # print(dict_batch) + # + # img_keys = [k for k in list(dict_batch.keys()) if "video" in k] + # img_sizes = list() + # + # for img_key in img_keys: + # img_sizes += [ii.shape for i in dict_batch[img_key][0] if i is not None for ii in i] + # + # for size in img_sizes: + # assert ( + # len(size) == 4 + # ), f"Collate error, an image should be in shape of (N, 3, H, W), instead of given {size}" + # + # if len(img_keys) != 0: + # global_max_height = max([i[2] for i in img_sizes]) + # global_max_width = max([i[3] for i in img_sizes]) + # local_max_height = min([i[2] for i in img_sizes]) + # local_max_width = min([i[3] for i in img_sizes]) + # for img_key in img_keys: + # img = dict_batch[img_key] + # global_view_size = len(dict_batch[img_key][0][0]) + # local_view_size = len(dict_batch[img_key][0][1]) + # # for image, padding one time dimension + # new_images = [ + # [ + # torch.zeros(batch_size, 1, 3, global_max_height, global_max_width) + # for _ in range(global_view_size) + # ], + # [ + # torch.zeros(batch_size, 1, 3, local_max_height, local_max_width) + # for _ in range(local_view_size) + # ] + # ] + # # print(len(img)) + # for bi in range(batch_size): + # orig_batch = img[bi] + # for vi in range(global_view_size): + # if orig_batch is None: + # continue + # else: + # orig = img[bi][0][vi] + # new_images[0][vi][bi, :, :, : orig.shape[2], : orig.shape[3]] = orig + # + # for bi in range(batch_size): + # orig_batch = img[bi] + # for vi in range(local_view_size): + # if orig_batch is None: + # continue + # else: + # orig = img[bi][1][vi] + # new_images[1][vi][bi, :, :, : orig.shape[2], : orig.shape[3]] = orig + # + # dict_batch[img_key] = new_images + # + # txt_keys = [k for k in list(dict_batch.keys()) if "text" in k] + # + # if len(txt_keys) != 0: + # texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys] + # encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys] + # draw_text_len = len(encodings) + # flatten_encodings = [e for encoding in encodings for e in encoding] + # flatten_mlms = mlm_collator(flatten_encodings) + # + # for i, txt_key in enumerate(txt_keys): + # texts, encodings = ( + # [d[0] for d in dict_batch[txt_key]], + # [d[1] for d in dict_batch[txt_key]], + # ) + # + # mlm_ids, mlm_labels = ( + # flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)], + # flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)], + # ) + # + # input_ids = torch.zeros_like(mlm_ids) + # attention_mask = torch.zeros_like(mlm_ids) + # for _i, encoding in enumerate(encodings): + # _input_ids, _attention_mask = ( + # torch.tensor(encoding["input_ids"]), + # torch.tensor(encoding["attention_mask"]), + # ) + # input_ids[_i, : len(_input_ids)] = _input_ids + # attention_mask[_i, : len(_attention_mask)] = _attention_mask + # + # dict_batch[txt_key] = texts + # dict_batch[f"{txt_key}_ids"] = input_ids + # dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100) + # dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids + # dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels + # dict_batch[f"{txt_key}_masks"] = attention_mask + # + # return dict_batch + + +def sample_frames(num_frames, vlen, sample='rand', fix_start=None): + acc_samples = min(num_frames, vlen) + intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int) + ranges = [] + for idx, interv in enumerate(intervals[:-1]): + ranges.append((interv, intervals[idx + 1] - 1)) + if sample == 'rand': + frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges] + elif fix_start is not None: + frame_idxs = [x[0] + fix_start for x in ranges] + elif sample == 'uniform': + frame_idxs = [(x[0] + x[1]) // 2 for x in ranges] + else: + raise NotImplementedError + + return frame_idxs + + +def read_frames_cv2(video_path, num_frames, sample='rand', fix_start=None): + cap = cv2.VideoCapture(video_path) + assert (cap.isOpened()) + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # get indexes of sampled frames + frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + frames = [] + success_idxs = [] + for index in frame_idxs: + cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1) + ret, frame = cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = torch.from_numpy(frame).byte() + # # (H x W x C) to (C x H x W) + frame = frame.permute(2, 0, 1) + frames.append(frame) + success_idxs.append(index) + else: + pass + # print(frame_idxs, ' fail ', index, f' (vlen {vlen})') + # return frames tensor + frames = torch.stack(frames) # .float() / 255 + # print(frames.size()) + cap.release() + return frames, success_idxs, vlen + + +def read_frames_decord(video_path, num_frames, sample='rand', fix_start=None): + video_reader = decord.VideoReader(video_path, num_threads=1) + vlen = len(video_reader) + frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + frames = video_reader.get_batch(frame_idxs) + frames = frames.float() / 255 + frames = frames.permute(0, 3, 1, 2) + return frames, frame_idxs, vlen \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc12m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc12m.py new file mode 100644 index 0000000..c2b764e --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc12m.py @@ -0,0 +1,159 @@ +import json +from .base_dataset import BaseDataset +import random +import os +import pandas as pd +import io +from PIL import Image +from CoTrain.datasets import client + + +class CC12MDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["cc12m_train"] + elif split == "val": + names = ["cc12m_val"] + elif split == "test": + names = ["cc12m_val"] + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self.data_dir = "s3://GCC/GCC12m/" + + def _load_metadata(self): + # download specific + # metadata_dir = './meta_data/cc12m' + # split_files = { + # 'train': 'train.tsv', + # 'val': 'val.tsv', + # 'test': 'test.tsv' + # } + # target_split_fp = split_files[self.split] + # metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + # self.metadata = metadata + file_path = "/mnt/cache/share_data/DSK_datasets/cc12m/cc12m_clean.json" + if self.split == "train": + self.metadata = [json.loads(x) for x in open(file_path).readlines()[:-10]] + else: + self.metadata = [json.loads(x) for x in open(file_path).readlines()[-10:]] + + self.metadata = [(x['caption'], x['filename']) for x in self.metadata] + + def _get_image_path(self, sample): + # print(sample[1]) + # rel_fp = str(sample[1]).split('/')[-1] + # print(os.path.join(self.data_dir, rel_fp)) + rel_fp = sample[1] + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_image(self, sample): + # print(sample) + abs_fp, rel_fp = self._get_image_path(sample) + if "s3://" in abs_fp: + img_bytes = client.get(abs_fp) + assert img_bytes is not None, "Get image failed from {}".format(img_bytes) + img = Image.open(io.BytesIO(img_bytes)).convert("RGB") + else: + img = Image.open(abs_fp).convert("RGB") + if img is None: + raise Exception("Invalid img!", rel_fp) + else: + return img + + def _get_object_path(self, sample): + """ + get the object npy path + Args: + sample (dict): + Returns: + abs path + """ + rel_object_fp = os.path.join(sample[1], '1.npz') + full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp) + return os.path.join(self.split, rel_object_fp), full_object_fp + + def get_image(self, index, sample, image_key="image"): + image = self.get_raw_image(sample) + image_tensor = self.image_aug(image, self.transforms) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + return { + "video": image_tensor, + "vid_index": sample[1], + "cap_index": index, + "raw_index": index, + } + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + image = self.get_raw_image(sample) + #image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + image_tensor = self.image_aug(image, self.transforms) + return {f"false_video_{rep}": image_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return { + "text": (text, encoding), + "vid_index": sample[1], + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + text = sample[0] + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + # print(self.draw_false_image) # 1 + while result is None: + sample = self.metadata[index] + # print(sample) + try: + ret = dict() + ret.update(self.get_image(index, sample)) + if not self.image_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_image(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print(f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + # ret["image"] = ret["image"].unsqueeze(1) + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc3m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc3m.py new file mode 100644 index 0000000..5845aa7 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc3m.py @@ -0,0 +1,155 @@ +from glob import glob +from .base_dataset import BaseDataset +import random +import os +import pandas as pd +import io +from PIL import Image +from CoTrain.datasets import client + + +class CC3MDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["cc3m_train"] + elif split == "val": + names = ["cc3m_val"] + elif split == "test": + names = ["cc3m_val"] + + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self.data_dir = "s3://GCC/" + + def _load_metadata(self): + # download specific + metadata_dir = './meta_data/cc3m' + split_files = { + 'train': 'cc3m_training_success_full.tsv', + 'val': 'cc3m_validation_success_full.tsv', # there is no test + 'test': 'cc3m_validation_success_full.tsv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + # elif self.split == 'val': + # metadata = metadata.sample(1000, random_state=0) # 15k val is unnecessarily large, downsample. + + self.metadata = metadata + + def _get_image_path(self, sample): + # conceptual captions uses this hashing to create the filename + rel_dir = 'training' + if self.split != 'train': + rel_dir = 'validation' + rel_fp = os.path.join(rel_dir, sample[1]) + #rel_fp = os.path.join(rel_dir, str(zlib.crc32(sample['thumbnailUrl'].encode('utf-8')) & 0xffffffff)) + # print(rel_fp) + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + #return sample['caption'] + + def get_raw_image(self, sample): + # print(sample) + abs_fp, rel_fp = self._get_image_path(sample) + if "s3://" in abs_fp: + img_bytes = client.get(abs_fp) + assert img_bytes is not None, "Get image failed from {}".format(img_bytes) + img = Image.open(io.BytesIO(img_bytes)).convert("RGB") + else: + img = Image.open(abs_fp).convert("RGB") + if img is None: + raise Exception("Invalid img!", rel_fp) + else: + return img + + def get_image(self, index, sample, image_key="image"): + image = self.get_raw_image(sample) + # image_tensor = [tr(image).unsqueeze(0) for tr in global_transforms] + image_tensor = self.image_aug(image, self.transforms) + # image_tensor[0] = image_tensor[0].unsqueeze(0) + # print(len(image_tensor)) + # print(image_tensor[0].size()) + return { + "video": image_tensor, + "vid_index": sample[1], + "cap_index": index, + "raw_index": index, + } + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + image = self.get_raw_image(sample) + # image_tensor = [tr(image).unsqueeze(0) for tr in global_transforms] + image_tensor = self.image_aug(image, self.transforms) + return {f"false_video_{rep}": image_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + # print(encoding.size()) + return { + "text": (text, encoding), + "vid_index": sample[1], + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + text = sample[0] + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + max_try = 10 + try_time = 0 + while result is None: + try_time += 1 + sample = self.metadata.iloc[index] + try: + ret = dict() + ret.update(self.get_image(index, sample)) + if not self.image_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_image(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + index = random.randint(0, len(self.metadata) - 1) + exc = e + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}") + try_time = 0 + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/coco_caption_karpathy_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/coco_caption_karpathy_dataset.py new file mode 100644 index 0000000..46c93dc --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/coco_caption_karpathy_dataset.py @@ -0,0 +1,30 @@ +from .base_dataset import BaseDataset + + +class CocoCaptionKarpathyDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["coco_caption_karpathy_train"] # , "coco_caption_karpathy_restval" + elif split == "val": + names = ["coco_caption_karpathy_val"] + # names = ["coco_caption_karpathy_test"] + # names = [] # for fast train + elif split == "test": + names = ["coco_caption_karpathy_test"] + # names = [] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def __getitem__(self, index): + suite = self.get_suite(index) + + if "test" in self.split: + _index, _question_index = self.index_mapper[index] + iid = self.table["image_id"][_index].as_py() + iid = int(iid.split(".")[0].split("_")[-1]) + suite.update({"iid": iid}) + + return suite diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/conceptual_caption_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/conceptual_caption_dataset.py new file mode 100644 index 0000000..9190311 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/conceptual_caption_dataset.py @@ -0,0 +1,19 @@ +from glob import glob +from .base_dataset import BaseDataset + + +class ConceptualCaptionDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + if split == "test": + split = "val" + + if split == "train": + names = [f"conceptual_caption_train_{i}" for i in range(30)] + elif split == "val": + names = ["conceptual_caption_val_0"] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/f30k_caption_karpathy_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/f30k_caption_karpathy_dataset.py new file mode 100644 index 0000000..61f0255 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/f30k_caption_karpathy_dataset.py @@ -0,0 +1,18 @@ +from .base_dataset import BaseDataset + + +class F30KCaptionKarpathyDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + + if split == "train": + names = ["f30k_caption_karpathy_train", "f30k_caption_karpathy_val"] + elif split == "val": + names = ["f30k_caption_karpathy_test"] + elif split == "test": + names = ["f30k_caption_karpathy_test"] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/laion400m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/laion400m.py new file mode 100644 index 0000000..82499fe --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/laion400m.py @@ -0,0 +1,149 @@ +import json +from .base_dataset import BaseDataset +import random +import os +import pandas as pd +import io +from PIL import Image +from CoTrain.datasets import client + + +class LAION400MDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["laion400m_train"] + elif split == "val": + names = ["laion400m_val"] + elif split == "test": + names = ["laion400m_val"] + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self.data_dir = "" + + def _load_metadata(self): + file_path = "/mnt/lustre/share_data/liyizhuo.vendor/datasets/LAION-400M-partial-meta.json" + if self.split == "train": + self.metadata = [json.loads(x) for x in open(file_path).readlines()[:-10]] + else: + self.metadata = [json.loads(x) for x in open(file_path).readlines()[-10:]] + + self.metadata = [(x['caption'], x['filename']) for x in self.metadata] + + def _get_image_path(self, sample): + # print(sample[1]) + # rel_fp = str(sample[1]).split('/')[-1] + # print(os.path.join(self.data_dir, rel_fp)) + rel_fp = sample[1] + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_image(self, sample): + # print(sample) + abs_fp, rel_fp = self._get_image_path(sample) + if "s3://" in abs_fp: + img_bytes = client.get(abs_fp) + assert img_bytes is not None, "Get image failed from {}".format(img_bytes) + img = Image.open(io.BytesIO(img_bytes)).convert("RGB") + else: + img = Image.open(abs_fp).convert("RGB") + if img is None: + raise Exception("Invalid img!", rel_fp) + else: + return img + + def _get_object_path(self, sample): + """ + get the object npy path + Args: + sample (dict): + Returns: + abs path + """ + rel_object_fp = os.path.join(sample[1], '1.npz') + full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp) + return os.path.join(self.split, rel_object_fp), full_object_fp + + def get_image(self, index, sample, image_key="image"): + image = self.get_raw_image(sample) + image_tensor = self.image_aug(image, self.transforms) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + return { + "video": image_tensor, + "vid_index": sample[1], + "cap_index": index, + "raw_index": index, + } + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + image = self.get_raw_image(sample) + #image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + image_tensor = self.image_aug(image, self.transforms) + return {f"false_video_{rep}": image_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return { + "text": (text, encoding), + "vid_index": sample[1], + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + text = sample[0] + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + # print(self.draw_false_image) # 1 + while result is None: + sample = self.metadata[index] + # print(sample) + try: + ret = dict() + ret.update(self.get_image(index, sample)) + if not self.image_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_image(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print(f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + # ret["image"] = ret["image"].unsqueeze(1) + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/mix100m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/mix100m.py new file mode 100644 index 0000000..d747ed2 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/mix100m.py @@ -0,0 +1,204 @@ +import json +from .base_dataset import BaseDataset +import random +import os +import pandas as pd +import numpy as np +import io +import torch +from PIL import Image +from CoTrain.datasets import client +import CoTrain.modules.dist_utils as du + + +class MIX100MDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.world_size = du.get_world_size() + self.rank = du.get_rank() + self._load_metadata() + if split == "train": + names = ["mix100m_train"] + elif split == "val": + names = ["mix100m_val"] + elif split == "test": + names = ["mix100m_val"] + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self.data_dir = "" + + def _load_metadata(self): + if self.split != "train": + file_path = "/mnt/lustre/share_data/liyizhuo/datasets/fake_mix100m_val.json" + self.metadata = [json.loads(x) for x in open(file_path).readlines()] + self.metadata = [ + (" ".join(x["caption"]), os.path.join(x["image_root"], x["filename"])) + for x in self.metadata + ] + return None + + meta_root = ( + "s3://liyizhuo/datasets/shlab_softmax_100m_10000/" + ) + file_list = [os.path.join(meta_root, f"{i}".zfill(5) + ".json") for i in range(10)] + + ranked_meta = [[] for _ in range(self.world_size)] + ranked_num = [0 for _ in range(self.world_size)] + import time + + start_time = time.time() + count = 0 + for seed_num, each_meta_file in enumerate(file_list): + f = client.get(each_meta_file).decode().strip().split("\n") + np.random.seed(seed_num) + random_ranks = np.random.randint(0, self.world_size, size=(len(f), )) + for i, line in enumerate(f): + count += 1 + if self.rank == random_ranks[i]: + info = json.loads(line.encode("UTF-8")) + info = ( + " ".join(info["caption"]), + os.path.join(info["image_root"], info["filename"]), + ) + + ranked_meta[self.rank].append(info) + ranked_num[self.rank] += 1 + if count % 1000000 == 0 and self.rank == 0: + print( + "-------------------------------------------------------------- every 1M time:", + (time.time() - start_time), + "{}M".format(count / 1000000), + ) + del f + + self.metadata = ranked_meta[self.rank] + num = ranked_num[self.rank] + + # balance data length in each subprocess + ranked_num = du.all_gather(num) + du.synchronize() + max_num = max(ranked_num) + if max_num > num: + diff = max_num - num + self.metadata.extend(random.sample(self.metadata, diff)) + num = len(self.metadata) + assert num == max_num + + def _get_image_path(self, sample): + # print(sample[1]) + # rel_fp = str(sample[1]).split('/')[-1] + # print(os.path.join(self.data_dir, rel_fp)) + rel_fp = sample[1] + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_image(self, sample): + # print(sample) + abs_fp, rel_fp = self._get_image_path(sample) + if "s3://" in abs_fp: + img_bytes = client.get(abs_fp) + assert img_bytes is not None, "Get image failed from {}".format(img_bytes) + img = Image.open(io.BytesIO(img_bytes)).convert("RGB") + else: + img = Image.open(abs_fp).convert("RGB") + if img is None: + raise Exception("Invalid img!", rel_fp) + else: + return img + + def _get_object_path(self, sample): + """ + get the object npy path + Args: + sample (dict): + Returns: + abs path + """ + rel_object_fp = os.path.join(sample[1], "1.npz") + full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp) + return os.path.join(self.split, rel_object_fp), full_object_fp + + def get_image(self, index, sample, image_key="image"): + image = self.get_raw_image(sample) + image_tensor = self.image_aug(image, self.transforms) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + return { + "video": image_tensor, + "vid_index": sample[1], + "cap_index": index, + "raw_index": index, + } + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + image = self.get_raw_image(sample) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + image_tensor = self.image_aug(image, self.transforms) + return {f"false_video_{rep}": image_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return { + "text": (text, encoding), + "vid_index": sample[1], + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + text = sample[0] + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + index %= len(self.metadata) + result = None + # print(self.draw_false_image) # 1 + while result is None: + sample = self.metadata[index] + # print(sample) + try: + ret = dict() + ret.update(self.get_image(index, sample)) + if not self.image_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_image(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print( + f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}" + ) + index = random.randint(0, len(self.metadata) - 1) + # ret["image"] = ret["image"].unsqueeze(1) + return ret + + def __len__(self): + return len(self.metadata) * self.world_size + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/nlvr2_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/nlvr2_dataset.py new file mode 100644 index 0000000..d562faa --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/nlvr2_dataset.py @@ -0,0 +1,51 @@ +from .base_dataset import BaseDataset +import sys +import random + + +class NLVR2Dataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["nlvr2_train"] + elif split == "val": + names = ["nlvr2_dev", "nlvr2_test1"] + elif split == "test": + names = ["nlvr2_dev", "nlvr2_test1"] + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + + def __getitem__(self, index): + result = None + while result is None: + try: + image_tensor_0 = self.get_image(index, image_key="image_0")["image"] + image_tensor_1 = self.get_image(index, image_key="image_1")["image"] + text = self.get_text(index)["text"] + result = True + except: + print( + f"error while read file idx {index} in {self.names[0]}", + file=sys.stderr, + ) + index = random.randint(0, len(self.index_mapper) - 1) + + index, question_index = self.index_mapper[index] + answers = self.table["answers"][index][question_index].as_py() + answers = answers == "True" + + return { + "image_0": image_tensor_0, + "image_1": image_tensor_1, + "text": text, + "answers": answers, + "table_name": self.table_names[index], + } diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/sbu_caption_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/sbu_caption_dataset.py new file mode 100644 index 0000000..ec253fe --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/sbu_caption_dataset.py @@ -0,0 +1,19 @@ +from glob import glob +from .base_dataset import BaseDataset + + +class SBUCaptionDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + if split == "test": + split = "val" + + if split == "train": + names = [f"sbu_{i}" for i in range(9)] + elif split == "val": + names = [] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vcr.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vcr.py new file mode 100644 index 0000000..7aab3ec --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vcr.py @@ -0,0 +1,362 @@ +from CoTrain.datasets.video.video_base_dataset import BaseDataset, color_img +import random +import os +import pandas as pd +import cv2 +from CoTrain.transforms.video.videoaug import VideoTransform +import torch + +## from https://github.com/rowanz/r2c/blob/master/dataloaders/vcr.py +# Here's an example jsonl +# { +# "movie": "3015_CHARLIE_ST_CLOUD", +# "objects": ["person", "person", "person", "car"], +# "interesting_scores": [0], +# "answer_likelihood": "possible", +# "img_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.jpg", +# "metadata_fn": "lsmdc_3015_CHARLIE_ST_CLOUD/3015_CHARLIE_ST_CLOUD_00.23.57.935-00.24.00.783@0.json", +# "answer_orig": "No she does not", +# "question_orig": "Does 3 feel comfortable?", +# "rationale_orig": "She is standing with her arms crossed and looks disturbed", +# "question": ["Does", [2], "feel", "comfortable", "?"], +# "answer_match_iter": [3, 0, 2, 1], +# "answer_sources": [3287, 0, 10184, 2260], +# "answer_choices": [ +# ["Yes", "because", "the", "person", "sitting", "next", "to", "her", "is", "smiling", "."], +# ["No", "she", "does", "not", "."], +# ["Yes", ",", "she", "is", "wearing", "something", "with", "thin", "straps", "."], +# ["Yes", ",", "she", "is", "cold", "."]], +# "answer_label": 1, +# "rationale_choices": [ +# ["There", "is", "snow", "on", "the", "ground", ",", "and", +# "she", "is", "wearing", "a", "coat", "and", "hate", "."], +# ["She", "is", "standing", "with", "her", "arms", "crossed", "and", "looks", "disturbed", "."], +# ["She", "is", "sitting", "very", "rigidly", "and", "tensely", "on", "the", "edge", "of", "the", +# "bed", ".", "her", "posture", "is", "not", "relaxed", "and", "her", "face", "looks", "serious", "."], +# [[2], "is", "laying", "in", "bed", "but", "not", "sleeping", ".", +# "she", "looks", "sad", "and", "is", "curled", "into", "a", "ball", "."]], +# "rationale_sources": [1921, 0, 9750, 25743], +# "rationale_match_iter": [3, 0, 2, 1], +# "rationale_label": 1, +# "img_id": "train-0", +# "question_number": 0, +# "annot_id": "train-0", +# "match_fold": "train-0", +# "match_index": 0, +# } + + +class VCRDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["vcr_train"] + elif split == "val": + names = ["vcr_val"] + elif split == "test": + names = ["vcr_test"] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self.video_transform = VideoTransform(mode=split, num_frames=self.num_frames) # train or val model + # for appear objects + self.only_use_relevant_dets = True + if self.only_use_relevant_dets: + self.relevant_dets = [] # resort the detection numbers + self.relevant_dets_classes = [] + + def _load_metadata(self): + # download specific + metadata_dir = './meta_data/vcr1annots' + split_files = { + 'train': 'train.jsonl', + 'val': 'val.jsonl', # there is no test + 'test': 'test.jsonl' + } + target_split_fp = split_files[self.split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = metadata + + def _get_image_path(self, sample): + # print(sample.keys()) + # print(sample['img_fn']) + # VCR/vcr1images + rel_fp = sample['img_fn'] + return os.path.join(self.data_dir, 'vcr1images', rel_fp), rel_fp + + def get_objects(self, sample): + metadata2 = pd.read_json(os.path.join(self.data_dir, 'vcr1images', + sample['metadata_fn']), lines=True) + object_meta = metadata2.iloc[0] + return object_meta + + def _get_caption(self, sample): + return sample[0] + + # def _get_objects(self, sample): + # metadata2 = pd.read_json(os.path.join(self.data_dir, + # sample['metadata_fn']), lines=True) + # sample = metadata2.iloc[0] + # return sample['boxes'] + + def get_raw_image(self, sample, object_meta, img_color_mask=True): + # print(sample) + abs_fp, rel_fp = self._get_image_path(sample) + # img = Image.open(abs_fp) + img = cv2.imread(abs_fp) + # add bbox annotation here + if img_color_mask: + img = color_img(img, object_meta, self.relevant_dets, self.only_use_relevant_dets) + if img is None: + raise Exception("Invalid img!", rel_fp) + else: + return img + + def get_image(self, index, sample, object_meta, image_key="image"): + frames = [] + image = self.get_raw_image(sample, object_meta) + frame = torch.from_numpy(image).byte() + frame = frame.permute(2, 0, 1) + frames.append(frame) + frames = torch.stack(frames).permute(1, 0, 2, 3) + # print(frames.size()) + image_tensor = [self.video_transform(frames).permute(1, 0, 2, 3)] # to tchw + # image = self.get_raw_image(sample) + # image_tensor = [tr(image) for tr in self.transforms] + # print(image_tensor.size()) + # image_tensor.unsqueeze(0) + return image_tensor + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + image = self.get_raw_image(sample) + image_tensor = [tr(image) for tr in self.transforms] + return {f"false_image_{rep}": image_tensor} + + # def get_text(self, sample, object_meta): + # question = self.get_question(sample, object_meta) + # texts = [] + # for answer in sample['answer_choices']: + # raw_text = question + '[SEP]' + # for word in answer: + # if isinstance(word, list): + # for object_idx in word: + # self.relevant_dets.add(object_idx) + # rel_object_idx = object_idx + # if self.only_use_relevant_dets: + # rel_object_idx = len(self.relevant_dets) - 1 # begin from 0 + # raw_text += ' ' + object_meta['names'][object_idx] + ' ' + str(rel_object_idx) + # else: + # raw_text += ' ' + word + # # for index in range(len(answer)): + # # raw_text += ' ' + str(answer[index]) + # print(raw_text) + # encoding = self.tokenizer( + # raw_text, + # padding="max_length", + # truncation=True, + # max_length=self.max_text_len, + # return_special_tokens_mask=True, + # ) + # texts.append((raw_text, encoding)) + # return texts + + def update_rele_det(self, sample, object_meta, index): + text = [] + for i in range(len(sample['question'])): + text.append(sample['question'][i]) + for i in range(len(sample['answer_choices'])): + for j in range(len(sample['answer_choices'][i])): + text.append(sample['answer_choices'][i][j]) + for i in range(len(sample['rationale_choices'])): + for j in range(len(sample['rationale_choices'][i])): + text.append(sample['rationale_choices'][i][j]) + # update relevant detes + for word in text: + if isinstance(word, list): + for object_idx in word: + # self.relevant_dets.add(object_idx) + if object_idx not in self.relevant_dets: + self.relevant_dets.append(object_idx) + for object in self.relevant_dets: + self.relevant_dets_classes.append(object_meta['names'][object]) + # print(index, text) + # print(index, self.relevant_dets) + # print(index, self.relevant_dets_classes) + # + return text + + def get_text(self, sample, object_meta, index): + # detect all object index and sort these items + if self.only_use_relevant_dets: + self.update_rele_det(sample, object_meta, index) + question = self.get_question(sample, object_meta) + qa_texts = [] + # image size: 384 x 384 + # prompt: [START] + "answer_question:" + # prompt: [START] + ' provide rationale:'), + # add all text tokens into this model. + for answer in sample['answer_choices']: + raw_text = question + 'answer question: ' + for word in answer: + if isinstance(word, list): + for object_idx in word: + raw_text += ' ' + object_meta['names'][object_idx] + ' ' + # rename the object index, for example + if self.only_use_relevant_dets: + raw_text += str(self.relevant_dets.index(object_idx)) + else: + raw_text += str(object_idx) + else: + raw_text += ' ' + word + raw_text += '[END]' + # print(index, raw_text) + qa_encoding = self.tokenizer( + raw_text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + qa_texts.append((raw_text, qa_encoding)) + + gt_ans = sample['answer_choices'][sample['answer_label']] + gt_ans_text = "" + for word in gt_ans: + if isinstance(word, list): + for object_idx in word: + gt_ans_text += ' ' + object_meta['names'][object_idx] + ' ' + # rename the object index, for example + if self.only_use_relevant_dets: + gt_ans_text += str(self.relevant_dets.index(object_idx)) + else: + gt_ans_text += str(object_idx) + else: + gt_ans_text += ' ' + word + qar_texts = [] + for reason in sample['rationale_choices']: + raw_text = question + gt_ans_text + 'provide rationale: ' + for word in reason: + if isinstance(word, list): + for object_idx in word: + raw_text += ' ' + object_meta['names'][object_idx] + ' ' + if self.only_use_relevant_dets: + raw_text += str(self.relevant_dets.index(object_idx)) + else: + raw_text += str(object_idx) + else: + raw_text += ' ' + word + # print(index, raw_text) + raw_text += '[END]' + encoding = self.tokenizer( + raw_text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + qar_texts.append((raw_text, encoding)) + return [qa_texts, qar_texts] + # + # def get_qar(self, sample, object_meta): + # question = self.get_question(sample, object_meta) + '[SEP]' # '[MIDDLE]' + # gt_ans = sample['answer_choices'][sample['answer_label']] + # gt_ans_text = "" + # # for index in range(len(gt_ans)): + # # gt_ans_text += ' ' + str(gt_ans[index]) + # for word in gt_ans: + # if isinstance(word, list): + # for object_idx in word: + # self.relevant_dets.add(object_idx) + # rel_object_idx = object_idx + # if self.only_use_relevant_dets: + # rel_object_idx = len(self.relevant_dets) - 1 # begin from 0 + # print(object_idx, rel_object_idx) + # gt_ans_text += ' ' + object_meta['names'][object_idx] + ' ' + str(rel_object_idx) + # else: + # gt_ans_text += ' ' + word + # texts = [] + # for reason in sample['rationale_choices']: + # raw_text = question + gt_ans_text + '[SEP]' + # for word in reason: + # if isinstance(word, list): + # for object_idx in word: + # self.relevant_dets.add(object_idx) + # rel_object_idx = object_idx + # if self.only_use_relevant_dets: + # rel_object_idx = len(self.relevant_dets) - 1 # begin from 0 + # raw_text += ' ' + object_meta['names'][object_idx] + ' ' + str(rel_object_idx) + # else: + # raw_text += ' ' + word + # print(raw_text) + # # for index in range(len(reason)): + # # raw_text += ' ' + str(reason[index]) + # # self.relevant_dets.append(object_idx) + # # print(raw_text) + # encoding = self.tokenizer( + # raw_text, + # padding="max_length", + # truncation=True, + # max_length=self.max_text_len, + # return_special_tokens_mask=True, + # ) + # texts.append((raw_text, encoding)) + # return texts + + def get_answer_label(self, sample): + answer = int(sample['answer_label']) + return answer + + def get_reason_answer_label(self, sample): + answer = int(sample['rationale_label']) + return answer + + def get_question(self, sample, object_meta): + raw_text = "" + for index in range(len(sample['question'])): + if isinstance(sample['question'][index], list): + for object_idx in sample['question'][index]: + raw_text += ' ' + object_meta['names'][object_idx] + ' ' + if self.only_use_relevant_dets: + raw_text += str(self.relevant_dets.index(object_idx)) + else: + raw_text += str(object_idx) + else: + raw_text += ' ' + str(sample['question'][index]) + return raw_text + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + sample = self.metadata.iloc[index] + object_meta = self.get_objects(sample) + self.relevant_dets = [] # initalize + self.relevant_dets_classes = [] + answer = self.get_answer_label(sample) + reason_answer = self.get_reason_answer_label(sample) + ret = { + "img_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer, + 'reason_answer': reason_answer + } + # texts = self.get_text(sample, object_meta) + # qar_texts = self.get_qar(sample, object_meta) + [qa_texts, qar_texts] = self.get_text(sample, object_meta, index) + ret["text"] = qa_texts[0] + # print(texts[0]) + # update other answers as false text + for i in range(self.draw_options_text - 1): + ret.update({f"options_text_{i}": qa_texts[i+1]}) + for j in range(self.draw_options_text): + ret.update({f"qar_text_{j}": qar_texts[j]}) + # print(ret.keys()) + image_tensor = self.get_image(index, sample, object_meta) + ret["image"] = image_tensor + return ret + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vg_caption_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vg_caption_dataset.py new file mode 100644 index 0000000..1dc0277 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vg_caption_dataset.py @@ -0,0 +1,19 @@ +from .base_dataset import BaseDataset + + +class VisualGenomeCaptionDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + if split == "test": + split = "val" + + if split == "train": + names = ["vg_train"] + elif split == "val": + names = [] + elif split == "test": + names = [] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vqav2_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vqav2_dataset.py new file mode 100644 index 0000000..f802dea --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/vqav2_dataset.py @@ -0,0 +1,47 @@ +from .base_dataset import BaseDataset + + +class VQAv2Dataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["vqav2_train", "vqav2_trainable_val"] + elif split == "val": + names = ["vqav2_rest_val"] + elif split == "test": + names = ["vqav2_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + + def __getitem__(self, index): + image_tensor = self.get_image(index)["image"] + text = self.get_text(index)["text"] + + index, question_index = self.index_mapper[index] + qid = self.table["question_id"][index][question_index].as_py() + + if self.split != "test": + answers = self.table["answers"][index][question_index].as_py() + labels = self.table["answer_labels"][index][question_index].as_py() + scores = self.table["answer_scores"][index][question_index].as_py() + else: + answers = list() + labels = list() + scores = list() + + return { + "image": image_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + } diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/image/yfcc15m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/yfcc15m.py new file mode 100644 index 0000000..77a8cd0 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/image/yfcc15m.py @@ -0,0 +1,149 @@ +import json +from .base_dataset import BaseDataset +import random +import os +import pandas as pd +import io +from PIL import Image +from CoTrain.datasets import client + + +class YFCC15MDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["yfcc15m_train"] + elif split == "val": + names = ["yfcc15m_val"] + elif split == "test": + names = ["yfcc15m_val"] + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self.data_dir = "s3://GCC/yfcc100m-part/data/" + + def _load_metadata(self): + file_path = "/mnt/cache/share_data/DSK_datasets/yfcc15m/yfcc15m_clean.json" + if self.split == "train": + self.metadata = [json.loads(x) for x in open(file_path).readlines()[:-10]] + else: + self.metadata = [json.loads(x) for x in open(file_path).readlines()[-10:]] + + self.metadata = [(x['caption'], x['filename']) for x in self.metadata] + + def _get_image_path(self, sample): + # print(sample[1]) + # rel_fp = str(sample[1]).split('/')[-1] + # print(os.path.join(self.data_dir, rel_fp)) + rel_fp = sample[1] + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_image(self, sample): + # print(sample) + abs_fp, rel_fp = self._get_image_path(sample) + if "s3://" in abs_fp: + img_bytes = client.get(abs_fp) + assert img_bytes is not None, "Get image failed from {}".format(img_bytes) + img = Image.open(io.BytesIO(img_bytes)).convert("RGB") + else: + img = Image.open(abs_fp).convert("RGB") + if img is None: + raise Exception("Invalid img!", rel_fp) + else: + return img + + def _get_object_path(self, sample): + """ + get the object npy path + Args: + sample (dict): + Returns: + abs path + """ + rel_object_fp = os.path.join(sample[1], '1.npz') + full_object_fp = os.path.join(self.object_dir, self.split, rel_object_fp) + return os.path.join(self.split, rel_object_fp), full_object_fp + + def get_image(self, index, sample, image_key="image"): + image = self.get_raw_image(sample) + image_tensor = self.image_aug(image, self.transforms) + # image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + return { + "video": image_tensor, + "vid_index": sample[1], + "cap_index": index, + "raw_index": index, + } + + def get_false_image(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + image = self.get_raw_image(sample) + #image_tensor = [tr(image).unsqueeze(0) for tr in self.transforms] + image_tensor = self.image_aug(image, self.transforms) + return {f"false_video_{rep}": image_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return { + "text": (text, encoding), + "vid_index": sample[1], + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata[random_index] + text = sample[0] + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + # print(self.draw_false_image) # 1 + while result is None: + sample = self.metadata[index] + # print(sample) + try: + ret = dict() + ret.update(self.get_image(index, sample)) + if not self.image_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_image(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print(f"Error while read file idx {sample[1]} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + # ret["image"] = ret["image"].unsqueeze(1) + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynet.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynet.py new file mode 100644 index 0000000..e8cf3b3 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynet.py @@ -0,0 +1,119 @@ +from .video_base_dataset import BaseDataset, read_frames_from_img_dir +import random +import os +import pandas as pd + + +class ActivityNetDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + if split == "train": + names = ["activitynet_train"] + elif split == "val": + names = ["activitynet_val"] + elif split == "test": + names = ["activitynet_val"] + self._load_metadata() + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def _load_metadata(self): + metadata_dir = './meta_data/activitynet' + split_files = { + 'train': 'train.jsonl', + 'val': 'val1.jsonl', + 'test': 'val2.jsonl' + } + target_split_fp = split_files[self.split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = metadata + + def _get_video_path(self, sample): + rel_video_fp = sample['clip_name'] + full_video_fp = os.path.join(self.data_dir, 'activitynet_frames', rel_video_fp) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + imgs, idxs, vlen = read_frames_from_img_dir(abs_fp, self.num_frames, mode=self.split) + if imgs is None: + raise Exception("Invalid img!", rel_fp) + else: + return imgs + + def get_video(self, index, sample, image_key="image"): + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return { + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + } + + def get_false_video(self, rep, image_key="image"): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return {f"false_video_{rep}": videos_tensor} + + def get_text(self, raw_index, sample): + text = sample['caption'] + # print(text) + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + # print(encoding.size()) + return { + "text": (text, encoding), + "img_index": raw_index, + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + text = sample['caption'] + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + while result is None: + sample = self.metadata.iloc[index] + try: + ret = dict() + ret.update(self.get_video(index, sample)) + if not self.image_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_image): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynetqa.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynetqa.py new file mode 100644 index 0000000..995911e --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/activitynetqa.py @@ -0,0 +1,110 @@ +import numpy as np +from .video_base_dataset import BaseDataset +import os +import json +import pandas as pd + + +class ACTIVITYNETQADataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + # if split == "test": + # split = "val" + self.split = split + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["activitynet_qa_train"] + # names = ["msrvtt_qa_train", "msrvtt_qa_val"] + elif split == "val": + names = ["activitynet_qa_test"] # ["msrvtt_qa_val"] + elif split == "test": + names = ["activitynet_qa_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + self.names = names + # self.num_frames = 4 + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/activitynet_qa' + split_files = { + 'train': 'msrvtt_qa_train.jsonl', + 'val': 'msrvtt_qa_test.jsonl', + 'test': 'msrvtt_qa_test.jsonl' + } + answer_fp = os.path.join(metadata_dir, 'msrvtt_train_ans2label.json') # 1500 in total (all classes in train) + # answer_fp = os.path.join(metadata_dir, 'msrvtt_qa_ans2label.json') # 4539 in total (all classes in train+val+test) + answer_clip_id = os.path.join(metadata_dir, 'msrvtt_clip_id.json') + with open(answer_fp, 'r') as JSON: + self.ans_lab_dict = json.load(JSON) + with open(answer_clip_id, 'r') as JSON: + self.ans_clip_id = json.load(JSON) + for name in self.names: + split = name.split('_')[-1] + target_split_fp = split_files[split] + # path_or_buf=os.path.join(metadata_dir, target_split_fp) + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + if self.metadata is None: + self.metadata = metadata + else: + self.metadata.update(metadata) + print("total {} samples for {}".format(len(self.metadata), self.names)) + # data1.update(data2) + + def get_text(self, sample): + text = sample['question'] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = sample['answer'] + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + try: + ans_label = self.ans_lab_dict[text] # + except KeyError: + ans_label = -100 # ignore classes + # ans_label = 1500 # other classes + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + # return text, ans_label_vector, scores + + def __getitem__(self, index): + sample = self.metadata.iloc[index] + video_tensor = self.get_video(sample) + text = self.get_text(sample) + # index, question_index = self.index_mapper[index] + qid = index + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + answers, labels, scores = self.get_answer_label(sample) + + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + "ans_clip_id": self.ans_clip_id, + } + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/didemo.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/didemo.py new file mode 100644 index 0000000..79fc95c --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/didemo.py @@ -0,0 +1,41 @@ +from .video_base_dataset import BaseDataset +import os +import pandas as pd + +# some videos are missed, for better results, do IO exception. + + +class DIDEMODataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + if split == "train": + names = ["didemo_train"] + elif split == "val": + names = ["didemo_val"] + elif split == "test": + names = ["didemo_val"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/didemo' + split_files = { + 'train': 'DiDeMo_train.tsv', + 'val': 'DiDeMo_val.tsv', # there is no test + 'test': 'DiDeMo_test.tsv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata + print("load split {}, {} samples".format(self.split, len(metadata))) + + def _get_video_path(self, sample): + rel_video_fp = sample[1] + full_video_fp = os.path.join(self.data_dir, '', rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + return sample[0] + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d.py new file mode 100644 index 0000000..4b05ed9 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d.py @@ -0,0 +1,57 @@ +from .video_base_dataset import BaseDataset, read_large_frames_decord +import pandas as pd +import os + + +# {'timestamp_sec': 221.29666, 'narration_text': '#C C walks on the ground'} + + +class Ego4DDataset(BaseDataset): + """EGO4D Video-Text loader.""" + + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["ego4d_train"] + elif split == "val": + names = ["ego4d_val"] + elif split == "test": + names = ["ego4d_test"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/ego4d' + split_files = { + 'train': 'ego4d_train_subset.csv', + 'val': 'ego4d_val_ts_clean.csv', + 'test': 'ego4d_val_ts_clean.csv' # there is no test + } + target_split_fp = split_files[self.split] + self.metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, error_bad_lines=False) + + def _get_video_path(self, sample): + rel_video_fp = sample[0] + '.mp4' + full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp) + if not os.path.exists(full_video_fp): + Exception(IOError) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + return sample[6] + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + # if int(sample[2]) > 600: + # raise Exception("Video is longer than 10m!", rel_fp) + frame_end, frame_loc = int(sample[3]), int(sample[5]) + # imgs = video_reader(abs_fp, frame_loc, frame_end, self.num_frames) + imgs = read_large_frames_decord(abs_fp, frame_loc, frame_end, self.num_frames) + if imgs is None: + raise Exception("Invalid video!", rel_fp) + else: + return imgs + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_choice.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_choice.py new file mode 100644 index 0000000..c6f3185 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_choice.py @@ -0,0 +1,101 @@ +from .video_base_dataset import BaseDataset, read_large_frames_decord, get_video_len +import os +import pandas as pd + + +class EGO4DChoiceDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + if self.split == "train": + Exception("no train data provided") + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["ego4d_choice_train"] + elif split == "val": + names = ["ego4d_choice_val"] + elif split == "test": + names = ["ego4d_choice_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="unknown", + remove_duplicate=False, + ) + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/ego4d' + split_files = { + 'train': 'mc_val.csv', # no train and test available, only for zero-shot testing + 'val': 'mc_val.csv', + 'test': 'mc_val.csv' + } + target_split_fp = split_files[self.split] + self.metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep=',', header=0, on_bad_lines='skip') + + def _get_video_path(self, sample): + rel_video_fp = eval(sample["question"])[0] + '.mp4' + full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp) + if not os.path.exists(full_video_fp): + Exception(IOError) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + frame_loc = eval(sample["question"])[1] + frame_end = get_video_len(abs_fp) + imgs = read_large_frames_decord(abs_fp, frame_loc, frame_end, self.num_frames, mode=self.split) + if imgs is None: + raise Exception("Invalid video!", rel_fp) + else: + return imgs + + def get_text(self, sample): + texts = [] + for answer in eval(sample["answers"]): + text = answer[-1] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + texts.append((text, encoding)) + return texts + + def get_answer_label(self, sample): + gt_text = eval(sample["question"])[-1] + answer_label = 0 + for index, answer in enumerate(eval(sample["answers"])): + if answer[-1] == gt_text: + answer_label = index + return answer_label + + def __getitem__(self, index): + sample = self.metadata.iloc[index] + # print(sample) + video_tensor = self.get_video(sample) + # index, question_index = self.index_mapper[index] + qid = index + answer = self.get_answer_label(sample) + ret = { + "video": video_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer + } + texts = self.get_text(sample) + ret["text"] = texts[0] + # print(len(texts)) + for i in range(self.draw_false_text - 1): + ret.update({f"false_text_{i}": texts[i+1]}) + return ret + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_v2.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_v2.py new file mode 100644 index 0000000..c43246b --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ego4d_v2.py @@ -0,0 +1,267 @@ +from .video_base_dataset import BaseDataset +import torch as th +import os +import numpy as np +import random +import ffmpeg +import json +from transforms.video.videoaug import VideoTransform +import subprocess + +# {'timestamp_sec': 221.29666, 'narration_text': '#C C walks on the ground'} + + +class Ego4DDataset(BaseDataset): + """EGO4D Video-Text loader.""" + + def __init__(self, *args, split="", **kwargs): + # def __init__( + # self, + # csv, + # video_root='', + # caption_root='', + # min_time=4.0, + # fps=16, + # num_frames=16, + # size=224, + # crop_only=False, + # center_crop=True, + # benchmark=False, + # token_to_word_path='data/dict.npy', + # max_words=20, + # num_candidates=1, + # random_left_right_flip=False, + # ): + # """ + # Args: + # """ + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["ego4d_train"] + elif split == "val": + names = ["ego4d_val"] + elif split == "test": + names = ["ego4d_test"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + self._load_metadata() + # for howto100 + self.min_time = 2.0 + self.size = 224 + self.fps = 5 + self.num_sec = self.video_num_frames / float(self.fps) + self.crop_only = False + self.center_crop = False + self.benchmark = False + self.num_candidates = 1 + self.random_flip = True + # print(self.data_dir) + # for howto caption dir + self._load_metadata() + # print(kwargs) + # self.num_frames = kwargs['num_frames'] + self.video_transform = VideoTransform(mode=self.split, num_frames=self.num_frames) # train or val model + + def _load_metadata(self): + metadata_dir = './meta_data' + split_files = { + 'train': 'ego4d/narration.json', + 'val': 'ego4d/narration.json', # there is no test + 'test': 'ego4d/narration.json' + } + target_split_fp = split_files[self.split] + with open(os.path.join(metadata_dir, target_split_fp), 'r') as jsonfile: + metadata = json.load(jsonfile) + # metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata + self.meta_keys = list(metadata.keys()) + + def __len__(self): + return len(self.meta_keys) + + def get_video_len(self, video_path): + duration = subprocess.check_output( + ['ffprobe', '-i', video_path, '-show_entries', 'format=duration', '-v', 'quiet', '-of', + 'csv=%s' % ("p=0")]) + # print("ffmpeg duration is: {}".format(duration)) + duration = float(str(duration)[2:-3]) # b'1027.806000\n' -> 1027.806 + return duration + + def read_frames_ffmpeg(self, video_path, center, video_len): + if center > video_len: + center = video_len - 2 * self.num_sec + start = int(max(0, center-self.min_time)) + end = int(min(video_len, center+self.min_time)) + start_seek = random.randint(start, int(max(start, end - self.num_sec))) + # video is too short + if video_len < 1: + start_seek = 0 + if start_seek + self.num_sec + 0.1 > video_len: + start_seek = video_len - self.num_sec - 0.1 + start_seek = max(start_seek, 0) + cmd = ( + ffmpeg + .input(video_path, ss=start_seek, t=self.num_sec + 0.01) + .filter('fps', fps=self.fps) + ) + if self.center_crop: + aw, ah = 0.5, 0.5 + else: + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + if self.crop_only: + cmd = ( + cmd.crop('(iw - {})*{}'.format(self.size, aw), + '(ih - {})*{}'.format(self.size, ah), + str(self.size), str(self.size)) + ) + else: + cmd = ( + cmd.crop('(iw - min(iw,ih))*{}'.format(aw), + '(ih - min(iw,ih))*{}'.format(ah), + 'min(iw,ih)', + 'min(iw,ih)') + .filter('scale', self.size, self.size) + ) + if self.random_flip and random.uniform(0, 1) > 0.5: + cmd = cmd.hflip() + out, _ = ( + cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24') + .run(capture_stdout=True, quiet=True) + ) + video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3]) + video_tensor = th.from_numpy(np.copy(video)) + video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01 + if video_tensor.size()[1] != self.num_frames: + print(video_tensor.size(), start, end, start_seek, video_len) + # print("video length: {}".format(self.get_video_len_from_timestammp())) + # add gausian noise here to prevent all blank boxez + if video_tensor.shape[1] < self.num_frames: + zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8) + video_tensor = th.cat((video_tensor, zeros), axis=1) + return video_tensor[:, :self.num_frames] + + def _zero_pad_tensor_token(self, tensor, size): + if len(tensor) >= size: + return tensor[:size] + else: + zero = th.zeros(size - len(tensor)).long() + return th.cat((tensor, zero), dim=0) + + def get_text(self, sample, index): + text = sample['narration_text'] + # TODO: May need to be improved for edge cases. + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return { + "text": (text, encoding), + "img_index": index, + "cap_index": index, + "raw_index": index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + # two annotations + if random.random() < 0.5: + meta = self.metadata[self.meta_keys[random_index]]['narration_pass_1'] + else: + meta = self.metadata[self.meta_keys[random_index]]['narration_pass_2'] + sample = meta[random.randint(0, len(meta) - 1)] # random choice one sample + text = sample['narration_text'] + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def _get_video_path(self, sample): + rel_video_fp = sample["video_path"] + '.mp4' + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + if not os.path.exists(full_video_fp): + Exception(IOError) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + # in four seconds + # print(sample) + sample["video_len"] = self.get_video_len(abs_fp) + center = sample['timestamp_sec'] + imgs = self.read_frames_ffmpeg(abs_fp, center, sample["video_len"]).permute(1, 0, 2, 3) + # print(imgs.size()) + if imgs is None: + raise Exception("Invalid video!", rel_fp) + else: + return imgs + + def get_video(self, sample): + imgs_tensor = self.get_raw_video(sample) + return imgs_tensor + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + # two annotations + if random.random() < 0.5: + meta = self.metadata[self.meta_keys[random_index]]['narration_pass_1'] + else: + meta = self.metadata[self.meta_keys[random_index]]['narration_pass_2'] + if len(meta) < 1: + return self.get_false_video(rep) + sample = meta[random.randint(0, len(meta) - 1)] # random choice one sample + sample["video_path"] = self.meta_keys[random_index] # video path + imgs_tensor = self.get_raw_video(sample) + return {f"false_image_{rep}": imgs_tensor} + + def get_suite(self, index): + result = None + while result is None: + # two annotations + try: + if random.random() < 0.5: + meta = self.metadata[self.meta_keys[index]]['narration_pass_1'] + else: + meta = self.metadata[self.meta_keys[index]]['narration_pass_2'] + if len(meta) < 2: + random_index = random.randint(0, len(self.metadata) - 1) + return self.get_suite(random_index) + sample = meta[random.randint(0, len(meta)-1)] # random choice one sample + sample["video_path"] = self.meta_keys[index] # video path + # print(sample) + ret = dict() + text = self.get_text(sample, index) + ret.update({"replica": True if text["cap_index"] > 0 else False}) + ret.update(text) + imgs_tensor = self.get_video(sample) + # print(imgs_tensor.size()) + ret.update({ + "image": imgs_tensor, + "img_index": index, + "cap_index": index, + "raw_index": index, + }) + ret.update({"replica": True if ret["cap_index"] > 0 else False}) + for i in range(self.draw_false_image): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + # print(e) + index = random.randint(0, len(self.metadata) - 1) + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/hmdb51.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/hmdb51.py new file mode 100644 index 0000000..505539b --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/hmdb51.py @@ -0,0 +1,88 @@ +import numpy as np +from .video_base_dataset import BaseDataset +import os + + +class HMDB51Dataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.ans_lab_dict = dict() + if split == "train": + names = ["hmdb51_train"] + elif split == "val": + names = ["hmdb51_val"] + elif split == "test": + names = ["hmdb51_test"] + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/hmdb51' + split_files = { + 'train': 'hmdb51_rgb_train_split_1.txt', + 'val': 'hmdb51_rgb_val_split_1.txt', + 'test': 'hmdb51_rgb_val_split_1.txt' + } + target_split_fp = split_files[self.split] + self.metadata = [x.strip().split(' ') for x in open(os.path.join(metadata_dir, target_split_fp))] + answer_fp = os.path.join(metadata_dir, 'hmdb51_classInd.txt') + with open(answer_fp, 'r') as f: + lines = f.readlines() + for line in lines: + self.ans_lab_dict[str(int(line.strip().split(' ')[0]) - 1)] = line.strip().split(' ')[1] + + def _get_video_path(self, sample): + # self.ans_lab_dict[sample[2]], + return os.path.join(self.data_dir, sample[0].split('/')[-1]) + '.avi', sample[0].split('/')[-1] + '.avi' + + def get_text(self, sample): + text = "A person is doing [MASK]" + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = "None" + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + ans_label = int(sample[2]) + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + # return text, ans_label_vector, scores + + def __getitem__(self, index): + sample = self.metadata[index] # .split(' ') + video_tensor = self.get_video(sample) + text = self.get_text(sample) + qid = index + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + } + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/howto100m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/howto100m.py new file mode 100644 index 0000000..e266f88 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/howto100m.py @@ -0,0 +1,254 @@ +from .video_base_dataset import BaseDataset +import torch as th +import pandas as pd +import os +import numpy as np +import random +import ffmpeg + +import io +import decord +decord.bridge.set_bridge('torch') +from CoTrain.datasets import client + + +class HT100MDataset(BaseDataset): + """HowTo100M Video-Text loader.""" + + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["howto100m_train"] + elif split == "val": + names = ["howto100m_val"] + elif split == "test": + names = ["howto100m_test"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + self.metadata = None + self._load_metadata() + # for howto100 + self.min_time = 4.0 + self.size = 256 + self.fps = 2 + self.num_sec = self.num_frames / float(self.fps) + self.crop_only = True + if self.split == 'train': + self.center_crop = False + else: + self.center_crop = True + self.benchmark = False + self.num_candidates = 1 + self.random_flip = True + if "s3://" in self.data_dir: + # Read from local + self.caption_dir = os.path.join("./meta_data/howto100m", 'howto100m_csv') + # self.caption_dir = os.path.join("./meta_data/howto100m", 'howto100m_csv') + else: + self.caption_dir = os.path.join(self.data_dir, 'howto100m_csv') + print(names, ": ", len(self.metadata), "samples in total.") + + def _load_metadata(self): + metadata_dir = './meta_data/howto100m_our' + split_files = { + 'train': 'ht100_videos_split.csv', + 'val': 'ht100_videos_split_val.csv', # there is no test + 'test': 'ht100_videos_split_val.csv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata["Name"] + + def read_frames_ffmpeg(self, video_path, start, end): + start_seek = random.randint(int(start), int(max(start, end - self.num_sec))) + cmd = ( + ffmpeg + .input(video_path, ss=start_seek, t=self.num_sec + 0.01) + .filter('fps', fps=self.fps) + ) + if self.center_crop: + aw, ah = 0.5, 0.5 + else: + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + if self.crop_only: + cmd = ( + cmd.crop('(iw - {})*{}'.format(self.size, aw), + '(ih - {})*{}'.format(self.size, ah), + str(self.size), str(self.size)) + ) + else: + cmd = ( + cmd.crop('(iw - min(iw,ih))*{}'.format(aw), + '(ih - min(iw,ih))*{}'.format(ah), + 'min(iw,ih)', + 'min(iw,ih)') + .filter('scale', self.size, self.size) + ) + if self.random_flip and random.uniform(0, 1) > 0.5: + cmd = cmd.hflip() + out, _ = ( + cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24').run(capture_stdout=True, quiet=True) + ) + # print(np.frombuffer(out, np.uint8).shape) + video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3]) + video_tensor = th.from_numpy(np.copy(video)) + video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide + if video_tensor.shape[1] < self.num_frames: + zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8) + video_tensor = th.cat((video_tensor, zeros), axis=1) + return video_tensor[:, :self.num_frames] + + def read_frames_decord(self, video_path, start, end): + # Build Decord conntainer + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + if isinstance(video_bytes, bytes): + video_bytes = io.BytesIO(video_bytes) + container = decord.VideoReader(video_bytes, ctx=decord.cpu(0)) + + real_fps = container.get_avg_fps() + start_frame, end_frame = real_fps * start, real_fps * end + num_real_frames = self.num_frames / self.fps * real_fps + start_seek = random.randint(int(start_frame), int(max(start_frame, end_frame - num_real_frames))) + indexes = np.linspace(start_seek, start_seek + num_real_frames, self.num_frames, endpoint=False, dtype=int) + indexes = [x for x in indexes if x < len(container)] + assert len(indexes) > 0, "Failed to decode from {}".format(video_path) + frames = container.get_batch(indexes) # [T, H, W, C] + _, H, W, _ = frames.shape + assert self.crop_only + if self.center_crop: + aw, ah = 0.5, 0.5 + else: + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + + top = int((H - self.size) * ah) + left = int((W - self.size) * aw) + bottom, right = top + self.size, left + self.size + frames = frames[:, top:bottom, left:right, :] + + if self.random_flip and random.uniform(0, 1) > 0.5: + frames = frames.flip([-2]) + + video_tensor = frames.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide + if video_tensor.shape[1] < self.num_frames: + zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8) + video_tensor = th.cat((video_tensor, zeros), axis=1) + # assert False, (video_tensor.min(), video_tensor.max()) + # assert False, video_tensor.shape + return video_tensor[:, :self.num_frames] + + # time consuming > load video, where is the csv file? (cfs->ceph) + def get_caption(self, caption): + cap = pd.read_csv(caption) + ind = random.randint(0, len(cap) - 1) + text = cap['text'].values[ind] + start, end = cap['start'].values[ind], cap['end'].values[ind] + if end - start < self.min_time: + diff = self.min_time - end + start + start = max(0, start - diff / 2) + end = start + self.min_time + return text, start, end + + def get_text(self, sample): + caption_csv = self.get_caption_path(sample) + text, start, end = self.get_caption(caption_csv) + # print(text) + # TODO: May need to be improved for edge cases. + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {"text": (text, encoding)}, int(start), int(end) + + def get_caption_path(self, sample): + # example xx/xx/xx.mp4 -> xx.csv + return os.path.join(self.caption_dir, sample.split('/')[-1].split('.')[0] + '.csv') + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + caption_csv = self.get_caption_path(sample) + text, start, end = self.get_caption(caption_csv) + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def _get_video_path(self, sample): + rel_video_fp = sample + if "s3://" in self.data_dir: + rel_video_fp = sample.split("/")[-1] + rel_video_fp = rel_video_fp.split(".")[0] + ".mp4" # All video on ceph is mp4 + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample, begin, end): + abs_fp, rel_fp = self._get_video_path(sample) + if "s3://" in abs_fp: + videos = self.read_frames_decord(abs_fp, begin, end).permute(1, 0, 2, 3) + else: + videos = self.read_frames_ffmpeg(abs_fp, begin, end).permute(1, 0, 2, 3) + if videos is None: + raise Exception("Invalid img!", rel_fp) + else: + return videos + + def get_video(self, sample, start, end): + videos = self.get_raw_video(sample, start, end) + videos_tensor = self.video_aug(videos, self.video_transform, byte=True) + return videos_tensor + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + caption_csv = self.get_caption_path(sample) + _, start, end = self.get_caption(caption_csv) + videos = self.get_raw_video(sample, start, end) + videos_tensor = self.video_aug(videos, self.video_transform, byte=True) + return {f"false_video_{rep}": videos_tensor} + + def get_suite(self, index): + result = None + max_try = 10 + try_time = 0 + while result is None: + try_time += 1 + sample = self.metadata.iloc[index] + try: + ret = dict() + text, start, end = self.get_text(sample) + ret.update(text) + videos_tensor = self.get_video(sample, start, end) + ret.update({ + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + }) + ret.update({"replica": True if ret["cap_index"] > 0 else False}) + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + index = random.randint(0, len(self.metadata) - 1) + exc = e + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}") + try_time = 0 + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400.py new file mode 100644 index 0000000..6e5aac3 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400.py @@ -0,0 +1,115 @@ +import numpy as np +from .video_base_dataset import BaseDataset +import os +import random +from CoTrain.transforms.video.videoaug import VideoTransform + + +class K400Dataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.ans_lab_dict = dict() + if split == "train": + names = ["k400_train"] + elif split == "val": + names = ["k400_val"] + elif split == "test": + names = ["k400_test"] + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + self.video_transform = VideoTransform(mode=self.split) # train or val model + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/k400' + split_files = { + 'train': 'k400_train_tsm.list', + 'val': 'k400_test_tsm.list', + 'test': 'k400_test_tsm.list' + } + target_split_fp = split_files[self.split] + with open(os.path.join(metadata_dir, target_split_fp)) as f: + self.metadata = f.readlines() + answer_fp = os.path.join(metadata_dir, 'kinetics_label_map.txt') + count = 0 + with open(answer_fp, 'r') as f: + lines = f.readlines() + for line in lines: + self.ans_lab_dict[str(line.strip())] = count + count += 1 + + def _get_video_path(self, sample): + # find the name is os.listdir() e.g. abseiling/0wR5jVB-WPk.mp4 + # /data/algceph/arcdata/Kinetics-400/train_zips/snowboarding/MCgJO4s1qBA_000129_000139.zip + # -> snowboarding/MCgJO4s1qBA_000129_000139.mp4 + if self.split == 'train': + rel_path = sample[0][46:-4] + '.mp4' + else: + # val maybe mkv. webm etc. + fake_path = sample[0][44:-4] + sub_dir, video_name = fake_path.split('/') + rel_path = sub_dir + for video in os.listdir(os.path.join(self.data_dir, self.split, sub_dir)): + if video_name in video: + rel_path = os.path.join(rel_path, video) + break + full_path = os.path.join(self.data_dir, self.split, rel_path) + # print(full_path) + return full_path, rel_path + + def get_text(self, sample): + text = "A persion is doing [MASK]" + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = "None" + # print(len(self.ans_lab_dict)) + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + ans_label = int(sample[1]) + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + + def __getitem__(self, index): + result = None + while result is None: + sample = self.metadata[index].split('\t') + try: + video_tensor = self.get_video(sample) + text = self.get_text(sample) + qid = index + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + result = True + except Exception as e: + print(f"Error while read file idx {sample[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + } + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400_video.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400_video.py new file mode 100644 index 0000000..9eb5d06 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/k400_video.py @@ -0,0 +1,130 @@ +from docutils import DataError +from importlib_metadata import metadata +from .video_base_dataset import BaseDataset, read_frames_decord +import os +import pandas as pd + + +class K400VideoDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + if self.split == "train": + Exception("no train data provided") + self.metadata = None + self.ans_texts = dict() + if split == "train": + names = ["k400_video_train"] + elif split == "val": + names = ["k400_video_val"] + elif split == "test": + names = ["k400_video_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="unknown", + remove_duplicate=False, + ) + self._load_metadata() + self.data_dir = "s3://video_pub/K400_videos" # TODO: Remove this piece of shit + + def _load_metadata(self): + metadata_dir = './meta_data/k400' + split_files = { + 'train': 'k400_test_tsm.list', + 'val': 'k400_test_tsm.list', + 'test': 'k400_test_tsm.list', + } + target_split_fp = split_files[self.split] + with open(os.path.join(metadata_dir, target_split_fp)) as f: + self.metadata = f.readlines() + self.metadata = [x.strip() for x in self.metadata] + self.metadata = [x.split("\t") for x in self.metadata] + self.metadata = [[x[0].split("/")[-1][:11], int(x[1])] for x in self.metadata] + + def _build_ans(self): + metadata_dir = './meta_data/k400' + answer_fp = os.path.join(metadata_dir, 'kinetics_label_map.txt') + ans_texts = open(answer_fp).readlines() + assert len(set(ans_texts)) == 400 + ans_texts = [x.strip() for x in ans_texts] + ans_texts = ["A person is doing {}".format(x) for x in ans_texts] + ans_texts = [ + ( + x, + self.tokenizer( + x, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ), + ) + for x in ans_texts + ] + self.ans_texts = {"text": ans_texts[0]} + self.ans_texts.update( + { + "false_text_{}".format(i): ans_texts[i + 1] + for i in range(len(ans_texts) - 1) + } + ) + + @staticmethod + def classes(): + metadata_dir = './meta_data/k400' + answer_fp = os.path.join(metadata_dir, 'kinetics_label_map.txt') + ans_texts = open(answer_fp).readlines() + assert len(set(ans_texts)) == 400 + ans_texts = [x.strip() for x in ans_texts] + return ans_texts + + def _get_video_path(self, sample): + rel_video_fp = sample[0] + '.mp4' + if "s3://" in self.data_dir: + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + else: + full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split) + if videos is None: + raise Exception("Invalid img!", rel_fp) + else: + return videos + + def get_video(self, sample): + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return videos_tensor + + def __getitem__(self, index): + ret = None + max_try = 10 + try_time = 0 + while ret is None: + try_time += 1 + try: + sample = self.metadata[index] + image_tensor = self.get_video(sample) + answer = sample[1] + ret = { + "video": image_tensor, + "img_index": index, + 'answer': answer, + } + except Exception as e: + index = (index + 1) % len(self.metadata) + exc = e + if try_time > max_try: + raise DataError( + f"Exceed max time Error while read file idx {sample[0]} with error {exc}" + ) + return ret + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_choice.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_choice.py new file mode 100644 index 0000000..45b3ffe --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_choice.py @@ -0,0 +1,121 @@ +from .video_base_dataset import BaseDataset +import os +import pandas as pd +import random +from .pack_meta import pack_metadata, unpack_metadata + + +class LSMDCChoiceDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["lsmdc_choice_train"] + elif split == "val": + names = ["lsmdc_choice_val"] + elif split == "test": + names = ["lsmdc_choice_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="unknown", + remove_duplicate=False, + ) + self._load_metadata() + if "s3://" in self.data_dir: + # Remove this fucking auto dir name + self.data_dir = os.path.dirname(self.data_dir) + # Add the real path + self.data_dir = os.path.join(self.data_dir, "LSMDC") + + def _load_metadata(self): + metadata_dir = './meta_data/lsmdc' + split_files = { + 'train': 'LSMDC16_multiple_choice_train.csv', + 'val': 'LSMDC16_multiple_choice_test_randomized.csv', # 'LSMDC16_multiple_choice_valid.csv', + 'test': 'LSMDC16_multiple_choice_test_randomized.csv' + } + target_split_fp = split_files[self.split] + print(os.path.join(metadata_dir, target_split_fp)) + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, on_bad_lines='skip') + datalist = [] + data_to_ignore = [ + "3056_PUBLIC_ENEMIES_01.24.29.351-01.24.32.274", + "3035_INSIDE_MAN_02.02.18.839-02.02.25.201", + "3064_SPARKLE_2012_00.14.12.000-00.14.22.429", + ] + for raw_id in range(len(metadata)): + raw_d = metadata.iloc[raw_id] + video_fp = raw_d[0] # 3001_21_JUMP_STREET_00.03.07.077-00.03.07.559 + if video_fp.strip() in data_to_ignore: + continue + sub_path = video_fp.split('.')[0] # 3001_21_JUMP_STREET_00 + remove = sub_path.split('_')[-1] # 00 + sub_path = sub_path.replace('_'+remove,'/') # 3001_21_JUMP_STREET/ + rel_video_fp = sub_path + video_fp + '.avi' # + options = [raw_d[idx] for idx in range(5, 10)] + d = dict( + id=video_fp, + vid_id=rel_video_fp, + answer=raw_d[10] - 1 if self.split in ['val', 'test'] else 0, + options=options, + ) + datalist.append(d) + self.metadata = pack_metadata(self, pd.DataFrame(datalist)) + print("load split {}, {} samples".format(self.split, len(self.metadata))) + + def _get_video_path(self, sample): + rel_video_fp = sample['vid_id'] + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + # print(full_video_fp) + # assert os.path.exists(full_video_fp) + return full_video_fp, rel_video_fp + + def get_text(self, sample): + texts = [] + for text in sample['options']: + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + texts.append((text, encoding)) + return texts + + def get_answer_label(self, sample): + answer = sample['answer'] + return answer + + def __getitem__(self, index): + result = False + while not result: + try: + sample = unpack_metadata(self, index) + video_tensor = self.get_video(sample) + qid = index + answer = self.get_answer_label(sample) + ret = { + "video": video_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer + } + texts = self.get_text(sample) + ret["text"] = texts[0] + for i in range(self.draw_false_text - 1): + ret.update({f"false_text_{i}": texts[i+1]}) + result = True + except Exception as e: + print(f"Error while read file idx {sample['vid_id']} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + return ret + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_dataset.py new file mode 100644 index 0000000..a32dda5 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/lsmdc_dataset.py @@ -0,0 +1,53 @@ +from .video_base_dataset import BaseDataset +import random +import os +import pandas as pd + + +class LSMDCDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + if split == "train": + names = ["lsmdc_train"] + elif split == "val": + names = ["lsmdc_val"] + elif split == "test": + names = ["lsmdc_test"] + self._load_metadata() + # self.num_frames = kwargs['num_frames'] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def _load_metadata(self): + metadata_dir = './meta_data/lsmdc' + split_files = { + 'train': 'LSMDC16_annos_training.csv', + 'val': 'LSMDC16_challenge_1000_publictect.csv', # LSMDC16_annos_val.csv + 'test': 'LSMDC16_challenge_1000_publictect.csv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t', header=None, error_bad_lines=False) + self.metadata = metadata + print("load split {}, {} samples".format(self.split, len(metadata))) + + def _get_video_path(self, sample): + # e.g. 3009_BATTLE_LOS_ANGELES_00.03.07.170-00.03.09.675 -> 3009_BATTLE_LOS_ANGELES/3009_BATTLE_LOS_ANGELES_00.03.07.170-00.03.09.675 + sub_dir = '_'.join(sample[0].split('_')[:-1]) + rel_video_fp = sample[0] + '.avi' + full_video_fp = os.path.join(self.data_dir, sub_dir, rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + if self.split == 'train': + words = sample[0].split(',') + num_word = len(words) + index = random.randint(0, num_word - 1) + caption = words[index] + else: + # caption = sample[0] + words = sample[0].split(',') + num_word = len(words) + index = random.randint(0, num_word - 1) + caption = words[index] + return caption diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt.py new file mode 100644 index 0000000..9eee244 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt.py @@ -0,0 +1,84 @@ +from .video_base_dataset import BaseDataset +import random +import os +import pandas as pd +import json +import numpy as np + + +class MSRVTTDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.cut = "jsfusion" + if split == "train": + names = ["msrvtt_train"] + elif split == "val": + names = ["msrvtt_val"] + elif split == "test": + names = ["msrvtt_val"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + self._load_metadata() + + def _load_metadata(self): + json_fp = os.path.join(self.data_dir, 'annotation', 'MSR_VTT.json') + with open(json_fp, 'r') as fid: + data = json.load(fid) + df = pd.DataFrame(data['annotations']) + + split_dir = os.path.join(self.data_dir, 'high-quality', 'structured-symlinks') + js_test_cap_idx_path = None + challenge_splits = {"val", "public_server_val", "public_server_test"} + if self.cut == "miech": # 7k + train_list_path = "train_list_miech.txt" + test_list_path = "test_list_miech.txt" # 1k + elif self.cut == "jsfusion": # 9k + train_list_path = "train_list_jsfusion.txt" + test_list_path = "val_list_jsfusion.txt" # 1k + js_test_cap_idx_path = "jsfusion_val_caption_idx.pkl" + elif self.cut in {"full-val", "full-test"}: + train_list_path = "train_list_full.txt" + if self.cut == "full-val": + test_list_path = "val_list_full.txt" + else: + test_list_path = "test_list_full.txt" + elif self.cut in challenge_splits: + train_list_path = "train_list.txt" + if self.cut == "val": + test_list_path = f"{self.cut}_list.txt" + else: + test_list_path = f"{self.cut}.txt" + else: + msg = "unrecognised MSRVTT split: {}" + raise ValueError(msg.format(self.cut)) + + train_df = pd.read_csv(os.path.join(split_dir, train_list_path), names=['videoid']) + test_df = pd.read_csv(os.path.join(split_dir, test_list_path), names=['videoid']) + self.split_sizes = {'train': len(train_df), 'val': len(test_df), 'test': len(test_df)} + + if self.split == 'train': + df = df[df['image_id'].isin(train_df['videoid'])] + else: + df = df[df['image_id'].isin(test_df['videoid'])] + + self.metadata = df.groupby(['image_id'])['caption'].apply(list) + if js_test_cap_idx_path is not None and self.split != 'train': + caps = pd.Series(np.load(os.path.join(split_dir, js_test_cap_idx_path), allow_pickle=True)) + new_res = pd.DataFrame({'caps': self.metadata, 'cap_idx': caps}) + new_res['test_caps'] = new_res.apply(lambda x: [x['caps'][x['cap_idx']]], axis=1) + self.metadata = new_res['test_caps'] + + self.metadata = pd.DataFrame({'captions': self.metadata}) + print("load split {}, {} samples".format(self.split, len(self.metadata))) + + # random choice or fixed? + def _get_caption(self, sample): + caption_sample = "rand" + if self.split in ['train', 'val'] and caption_sample == "rand": + caption = random.choice(sample['captions']) + else: + caption = sample['captions'][0] + return caption + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt_choice.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt_choice.py new file mode 100644 index 0000000..56ef3c8 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvtt_choice.py @@ -0,0 +1,90 @@ +from .video_base_dataset import BaseDataset +import os +import pandas as pd +from .pack_meta import pack_metadata, unpack_metadata + + +class MSRVTTChoiceDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + if self.split == "train": + Exception("no train data provided") + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["msrvtt_choice_train"] + elif split == "val": + names = ["msrvtt_choice_val"] + elif split == "test": + names = ["msrvtt_choice_test"] # vqav2_test-dev for test-dev + + # Since the data is distribued like everywhere + # We manully change data_dir + args = ("./meta_data", *args[1:]) + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="unknown", + remove_duplicate=False, + ) + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/msrvtt' + split_files = { + 'train': 'msrvtt_mc_test.jsonl', # no train and test available, only for zero-shot + 'val': 'msrvtt_mc_test.jsonl', + 'test': 'msrvtt_mc_test.jsonl' + } + target_split_fp = split_files[self.split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = pack_metadata(self, metadata) + + def _get_video_path(self, sample): + return os.path.join(self.data_dir, 'videos', 'all', sample['clip_name'] + '.mp4'), sample['clip_name'] + '.mp4' + + def get_text(self, sample): + texts = [] + for text in sample['options']: + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + texts.append((text, encoding)) + return texts + + def get_answer_label(self, sample): + answer = sample['answer'] + return answer + + def __getitem__(self, index): + sample = unpack_metadata(self, index) + video_tensor = self.get_video(sample) + # index, question_index = self.index_mapper[index] + qid = index + answer = self.get_answer_label(sample) + ret = { + "video": video_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer + } + texts = self.get_text(sample) + ret["text"] = texts[0] + # print(len(texts)) + for i in range(self.draw_false_text - 1): + ret.update({f"false_text_{i}": texts[i+1]}) + # for i in range(self.draw_false_text-1): + # ret[f"false_text_{i}"] = texts[i+1] + # print(ret.keys()) + return ret + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvttqa.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvttqa.py new file mode 100644 index 0000000..8f081b3 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msrvttqa.py @@ -0,0 +1,110 @@ +import numpy as np +from .video_base_dataset import BaseDataset +import os +import json +import pandas as pd + + +class MSRVTTQADataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + # if split == "test": + # split = "val" + self.split = split + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["msrvtt_qa_train"] + # names = ["msrvtt_qa_train", "msrvtt_qa_val"] + elif split == "val": + names = ["msrvtt_qa_test"] # ["msrvtt_qa_val"] + elif split == "test": + names = ["msrvtt_qa_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + self.names = names + # self.num_frames = 4 + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/msrvtt' + split_files = { + 'train': 'msrvtt_qa_train.jsonl', + 'val': 'msrvtt_qa_test.jsonl', + 'test': 'msrvtt_qa_test.jsonl' + } + answer_fp = os.path.join(metadata_dir, 'msrvtt_train_ans2label.json') # 1500 in total (all classes in train) + # answer_fp = os.path.join(metadata_dir, 'msrvtt_qa_ans2label.json') # 4539 in total (all classes in train+val+test) + answer_clip_id = os.path.join(metadata_dir, 'msrvtt_clip_id.json') + with open(answer_fp, 'r') as JSON: + self.ans_lab_dict = json.load(JSON) + with open(answer_clip_id, 'r') as JSON: + self.ans_clip_id = json.load(JSON) + for name in self.names: + split = name.split('_')[-1] + target_split_fp = split_files[split] + # path_or_buf=os.path.join(metadata_dir, target_split_fp) + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + if self.metadata is None: + self.metadata = metadata + else: + self.metadata.update(metadata) + print("total {} samples for {}".format(len(self.metadata), self.names)) + # data1.update(data2) + + def get_text(self, sample): + text = sample['question'] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = sample['answer'] + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + try: + ans_label = self.ans_lab_dict[text] # + except KeyError: + ans_label = -100 # ignore classes + # ans_label = 1500 # other classes + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + # return text, ans_label_vector, scores + + def __getitem__(self, index): + sample = self.metadata.iloc[index] + video_tensor = self.get_video(sample) + text = self.get_text(sample) + # index, question_index = self.index_mapper[index] + qid = index + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + answers, labels, scores = self.get_answer_label(sample) + + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + "ans_clip_id": self.ans_clip_id, + } + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvd.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvd.py new file mode 100644 index 0000000..7561449 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvd.py @@ -0,0 +1,51 @@ +from .video_base_dataset import BaseDataset +import random +import os +import pandas as pd + + +class MSVDDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + if split == "train": + names = ["msvd_train"] + elif split == "val": + names = ["msvd_val"] + elif split == "test": + names = ["msvd_test"] + self._load_metadata() + # self.num_frames = kwargs['num_frames'] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + def _load_metadata(self): + metadata_dir = './meta_data/msvd' + split_files = { + 'train': 'MSVD_train.tsv', + 'val': 'MSVD_test.tsv', # MSVD_val.tsv + 'test': 'MSVD_test.tsv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata + print("load split {}, {} samples".format(self.split, len(metadata))) + + def _get_video_path(self, sample): + rel_video_fp = sample[1] + '.avi' + full_video_fp = os.path.join(self.data_dir, 'YouTubeClips', rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + if self.split == 'train': + words = sample[0].split(',') + num_word = len(words) + index = random.randint(0, num_word - 1) + caption = words[index] + else: + # caption = sample[0] + words = sample[0].split(',') + num_word = len(words) + index = random.randint(0, num_word - 1) + caption = words[index] + return caption diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvdqa.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvdqa.py new file mode 100644 index 0000000..559719a --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/msvdqa.py @@ -0,0 +1,121 @@ +import numpy as np +from .video_base_dataset import BaseDataset +import os +import json +import pandas as pd + + +class MSVDQADataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["msvd_qa_train"] + elif split == "val": + names = ["msvd_qa_test"] # test: directly output test result + # ["msvd_qa_val"] + elif split == "test": + names = ["msvd_qa_test"] # vqav2_test-dev for test-dev + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + self._load_metadata() + self.data_dir = "s3://video_pub/MSVD/" # TODO: Remove this piece of shit + + def _load_metadata(self): + metadata_dir = './meta_data/msvd' + split_files = { + 'train': 'msvd_train_qa_encode.json', + 'val': 'msvd_val_qa_encode.json', + 'test': 'msvd_test_qa_encode.json' + } + # read ans dict + self.ans_lab_dict = {} + answer_fp = os.path.join(metadata_dir, 'msvd_answer_set.txt') + answer_clip_id = os.path.join(metadata_dir, 'msvd_clip_id.json') + self.youtube_mapping_dict = dict() + with open(os.path.join(metadata_dir, 'msvd_youtube_mapping.txt')) as f: + lines = f.readlines() + for line in lines: + info = line.strip().split(' ') + self.youtube_mapping_dict[info[1]] = info[0] + with open(answer_fp, 'r') as f: + lines = f.readlines() + count = 0 + for line in lines: + self.ans_lab_dict[str(line.strip())] = count + count += 1 + with open(answer_clip_id, 'r') as JSON: + self.ans_clip_id = json.load(JSON) + for name in self.names: + split = name.split('_')[-1] + target_split_fp = split_files[split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + if self.metadata is None: + self.metadata = metadata + else: + self.metadata.update(metadata) + print("total {} samples for {}".format(sum(1 for line in self.metadata), self.names)) + + def _get_video_path(self, sample): + rel_video_fp = self.youtube_mapping_dict['vid' + str(sample["video_id"])] + '.avi' + # print(rel_video_fp) + full_video_fp = os.path.join(self.data_dir, 'MSVD_Videos', rel_video_fp) + return full_video_fp, rel_video_fp + + def get_text(self, sample): + text = sample['question'] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = sample['answer'] + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + try: + ans_label = self.ans_lab_dict[text] # + except KeyError: + ans_label = -100 # ignore classes + # ans_label = 1500 # other classes + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + # return text, ans_label_vector, scores + + def __getitem__(self, index): + sample = self.metadata[index].iloc[0] + video_tensor = self.get_video(sample) + text = self.get_text(sample) + # index, question_index = self.index_mapper[index] + qid = index + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + "ans_clip_id": self.ans_clip_id, + } + + def __len__(self): + return sum(1 for line in self.metadata) # count # json lines \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/pack_meta.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/pack_meta.py new file mode 100644 index 0000000..ce7bd3b --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/pack_meta.py @@ -0,0 +1,91 @@ +import pandas as pd +import numpy as np +from typing import Union + +SEP = "<<>>" + + +class DummyMeta(object): + def __init__(self, l): + self._len = l + + def __len__(self): + return self._len + + +def string_to_sequence(s: Union[str, list], dtype=np.int32) -> np.ndarray: + if isinstance(s, list): + assert not any(SEP in x for x in s) + s = SEP.join(s) + return np.array([ord(c) for c in s], dtype=dtype) + + +def sequence_to_string(seq: np.ndarray) -> Union[str, list]: + s = ''.join([chr(c) for c in seq]) + if SEP in s: + return s.split(SEP) + return s + + +def pack_sequences(seqs: Union[np.ndarray, list]) -> (np.ndarray, np.ndarray): + values = np.concatenate(seqs, axis=0) + offsets = np.cumsum([len(s) for s in seqs]) + return values, offsets + + +def unpack_sequence(values: np.ndarray, offsets: np.ndarray, index: int) -> np.ndarray: + off1 = offsets[index] + if index > 0: + off0 = offsets[index - 1] + elif index == 0: + off0 = 0 + else: + raise ValueError(index) + return values[off0:off1] + + +def pack_metadata(obj: object, df: pd.DataFrame): + assert not hasattr(obj, "metadata_keys") + assert not hasattr(obj, "metadata_is_str") + + df = df.dropna() + + metadata_keys = list(df.columns) + metadata_is_str = {c: df[c].dtype == pd.StringDtype for c in df.columns} + for c in df.columns: + if df[c].dtype == pd.StringDtype: + assert not hasattr(obj, "metadata_{}_v".format(c)) + assert not hasattr(obj, "metadata_{}_o".format(c)) + + seq_v, seq_o = pack_sequences([string_to_sequence(s) for s in df[c]]) + setattr(obj, "metadata_{}_v".format(c), seq_v) + setattr(obj, "metadata_{}_o".format(c), seq_o) + else: + assert not hasattr(obj, "metadata_{}".format(c)) + + seq = df[c].to_numpy() + setattr(obj, "metadata_{}".format(c), seq) + + setattr(obj, "metadata_keys", metadata_keys) + setattr(obj, "metadata_is_str", metadata_is_str) + + return DummyMeta(len(df)) + + +def unpack_metadata(obj: object, i: int): + ret = [] + for c in getattr(obj, "metadata_keys"): + if not getattr(obj, "metadata_is_str")[c]: + ret.append(getattr(obj, "metadata_{}".format(c))[i]) + else: + ret.append( + sequence_to_string( + unpack_sequence( + getattr(obj, "metadata_{}_v".format(c)), + getattr(obj, "metadata_{}_o".format(c)), + i, + ) + ) + ) + + return pd.Series(dict(zip(obj.metadata_keys, ret))) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgif.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgif.py new file mode 100644 index 0000000..4feb87f --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgif.py @@ -0,0 +1,120 @@ +import numpy as np +from .video_base_dataset import BaseDataset, read_frames_gif +import os +import json +import pandas as pd +import random + +# 2022.1.28 read gif is too slow, may be need to speedup by convert gif -> video +# https://stackify.dev/833655-python-convert-gif-to-videomp4 + + +class TGIFDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.ans_lab_dict = None + if split == "train": + names = ["tgif_train"] + elif split == "val": + names = ["tgif_val"] + elif split == "test": + names = ["tgif_test"] + + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + # self.num_frames = 4 + self._load_metadata() + self.data_dir = "{your_tgif_dir}" + + def _load_metadata(self): + metadata_dir = './meta_data/tgif' + split_files = { + 'train': 'frameqa_train.jsonl', + 'val': 'frameqa_test.jsonl', # frameqa_val.jsonl + 'test': 'frameqa_test.jsonl' + } + target_split_fp = split_files[self.split] + answer_fp = os.path.join(metadata_dir, 'frameqa_trainval_ans2label.json') + # answer_fp = os.path.join(metadata_dir, 'msrvtt_qa_ans2label.json') + with open(answer_fp, 'r') as JSON: + self.ans_lab_dict = json.load(JSON) + # path_or_buf=os.path.join(metadata_dir, target_split_fp) + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = metadata + + def _get_video_path(self, sample): + return os.path.join(self.data_dir, 'gifs', sample['gif_name']) + '.gif', sample['gif_name'] + '.gif' + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + imgs, idxs, vlen = read_frames_gif(abs_fp, self.num_frames, mode=self.split) + if imgs is None: + raise Exception("Invalid img!", rel_fp) + else: + return imgs + + def get_text(self, sample): + text = sample['question'] + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = sample['answer'] + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + try: + ans_label = self.ans_lab_dict[text] # + except KeyError: + ans_label = -100 # ignore classes + # ans_label = 1500 # other classes + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + # return text, ans_label_vector, scores + + def __getitem__(self, index): + result = None + while result is None: + sample = self.metadata.iloc[index] + try: + video_tensor = self.get_video(sample) + text = self.get_text(sample) + # index, question_index = self.index_mapper[index] + qid = index + result = True + except Exception as e: + gif_name = sample["gif_name"] + print(f"Error while read file idx {gif_name}") + assert self.split != "test" + index = random.randint(0, len(self.metadata) - 1) + + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + } + + def __len__(self): + return len(self.metadata) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgifqa.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgifqa.py new file mode 100644 index 0000000..3b9f3ba --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tgifqa.py @@ -0,0 +1,136 @@ +from .video_base_dataset import BaseDataset, read_frames_gif +import random +import os +import pandas as pd + +# action and transition: { +# "gif_name": "tumblr_nk172bbdPI1u1lr18o1_250", +# "question": "What does the butterfly do 10 or more than 10 times ?", +# "options": ["stuff marshmallow", "holds a phone towards face", +# "fall over", "talk", "flap wings"], +# "answer": 4 +# } + + +class TGIFQADataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.data_split = "action" # transition/action + self.metadata = None + self._load_metadata() + if split == "train": + names = ["tgifqa_train"] + elif split == "val": + names = ["tgifqa_val"] + elif split == "test": + names = ["tgifqa_test"] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + # for appear objects + self.only_use_relevant_dets = True + if self.only_use_relevant_dets: + self.relevant_dets = [] # resort the detection numbers + self.relevant_dets_classes = [] + self.fps = 3 # tgif sample 3 frames per second + self.data_dir = "/mnt/lustre/share_data/heyinan/data/tgif" # TODO: Remove this piece of shit + + def _load_metadata(self): + # download specific + metadata_dir = './meta_data/tgif' + if self.data_split == "action": + split_files = { + 'train': 'action_train.jsonl', + 'val': 'action_test.jsonl', # action_val.jsonl + 'test': 'action_test.jsonl' # no GT label for test set + } + elif self.data_split == "transition": + split_files = { + 'train': 'transition_train.jsonl', + 'val': 'transition_test.jsonl', # transition_val.jsonl + 'test': 'transition_test.jsonl' # no GT label for test set + } + else: + Exception("not support split") + target_split_fp = split_files[self.split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = metadata + + # def _get_image_path(self, sample): + # # for example: tvqa/frames/raw_frames/frames_hq/met_frames/met_s06e22_seg01_clip_02 + # dir_name = sample['vid_name'].split('_')[0] + # if dir_name not in ['bbt', 'castle', 'friends', 'grey', 'house', 'met']: + # dir_name = 'bbt' + # rel_fp = os.path.join('frames/raw_frames/frames_hq/', dir_name + '_frames', sample['vid_name']) + # return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + def _get_video_path(self, sample): + return os.path.join(self.data_dir, 'gifs', sample['gif_name']) + '.gif', sample['gif_name'] + '.gif' + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + imgs, idxs, vlen = read_frames_gif(abs_fp, self.num_frames, mode=self.split) + if imgs is None: + raise Exception("Invalid img!", rel_fp) + else: + return imgs + + def get_text(self, sample): + question = self.get_question(sample) + qa_texts = [] + # 5 choices # ClipBERT: " ", Ours: [SEP] + options = " ".join(sample["options"][i] for i in range(5)) + for i in range(5): + raw_text = question + "Options: " + options + "Answer: " + sample["options"][i] + # raw_text = question + "[SEP]" + sample["options"][i] + # print(raw_text) + qa_encoding = self.tokenizer( + raw_text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + qa_texts.append((raw_text, qa_encoding)) + return qa_texts + + def get_answer_label(self, sample): + answer = int(sample['answer']) + return answer + + def get_question(self, sample): + return sample["question"] + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + result = None + while result is None: + sample = self.metadata.iloc[index] + try: + self.relevant_dets = [] # initalize + self.relevant_dets_classes = [] + answer = self.get_answer_label(sample) + ret = { + "vid_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer + } + qa_texts = self.get_text(sample) + ret["text"] = qa_texts[0] + for i in range(self.draw_options_text - 1): + ret.update({f"options_text_{i}": qa_texts[i+1]}) + video_tensor = self.get_video(sample) + ret["image"] = video_tensor + result = True + except Exception as e: + print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}") + print("time stamp is: {}".format(sample['ts'])) + index = random.randint(0, len(self.metadata) - 1) + return ret + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqa.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqa.py new file mode 100644 index 0000000..50b4f72 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqa.py @@ -0,0 +1,161 @@ +from .video_base_dataset import BaseDataset +import random +import os +import pandas as pd +import cv2 +import torch +from CoTrain.datasets.video.video_base_dataset import sample_frames + +# each sample: https://tvqa.cs.unc.edu/download_tvqa.html +# { +# "a0": "A martini glass", +# "a1": "Nachos", +# "a2": "Her purse", +# "a3": "Marshall's book", +# "a4": "A beer bottle", +# "answer_idx": 4, +# "q": "What is Robin holding in her hand when she is talking to Ted about Zoey?", +# "qid": 7, +# "ts": "1.21-8.49", +# "vid_name": "met_s06e22_seg01_clip_02", +# "show_name": +# } + + +class TVQADataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["tvqa_train"] + elif split == "val": + names = ["tvqa_val"] + elif split == "test": + names = ["tvqa_test"] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + # for appear objects + self.only_use_relevant_dets = True + if self.only_use_relevant_dets: + self.relevant_dets = [] # resort the detection numbers + self.relevant_dets_classes = [] + self.fps = 3 # tvqa sample 3 frames per second + + def _load_metadata(self): + # download specific + metadata_dir = './meta_data/tvqa' + split_files = { + 'train': 'tvqa_train.jsonl', + 'val': 'tvqa_val.jsonl', + 'test': 'tvqa_test_public.jsonl' # no GT label for test set + } + target_split_fp = split_files[self.split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = metadata + + def _get_image_path(self, sample): + # for example: tvqa/frames/raw_frames/frames_hq/met_frames/met_s06e22_seg01_clip_02 + dir_name = sample['vid_name'].split('_')[0] + if dir_name not in ['bbt', 'castle', 'friends', 'grey', 'house', 'met']: + dir_name = 'bbt' + rel_fp = os.path.join('frames/raw_frames/frames_hq/', dir_name + '_frames', sample['vid_name']) + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + # need to speed up + def _get_video_len(self, dir): + return len(os.listdir(dir)) + + # tvqa provide sampled frames + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_image_path(sample) + [beg_time, end_time] = sample['ts'].split('-') + clip_len = int((float(end_time) - float(beg_time)) * self.fps) + # try: + # clip_len = int((float(end_time) - float(beg_time)) * self.fps) + # except ValueError: + # clip_len = 1 + # prevent short than 1 second + clip_len = max(clip_len, 2*self.num_frames) + rel_frame_index = sample_frames(self.num_frames, clip_len) + begin_frame_index = max(1, int(float(beg_time) * self.fps)) + video_len = self._get_video_len(abs_fp) + # sample N frames here + frames = [] + for index in rel_frame_index: + abs_index = begin_frame_index + index + abs_index = min(video_len, abs_index) + image_rel_path = f'{abs_index:05}' + img = cv2.imread(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path))) + # print(img) + # print(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path))) + if img is None: + print(sample['vid_name']) + print(os.path.join(abs_fp, '{}.jpg'.format(image_rel_path))) + frame = torch.from_numpy(img).byte() + frame = frame.permute(2, 0, 1) + frames.append(frame) + frames = torch.stack(frames).permute(1, 0, 2, 3) + return frames + + def get_text(self, sample): + question = self.get_question(sample) + qa_texts = [] + # 5 choices # ClipBERT: " ", Ours: [SEP] + # if the length suppress than 40 ? + options = " ".join(sample["a{}".format(i)] for i in range(5)) + for i in range(5): + raw_text = question + "Options: " + options + "Answer: " + sample["a{}".format(i)] + # raw_text = question + "[SEP]" + sample["a{}".format(i)] + # print(raw_text) + qa_encoding = self.tokenizer( + raw_text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + qa_texts.append((raw_text, qa_encoding)) + return qa_texts + + def get_answer_label(self, sample): + answer = int(sample['answer_idx']) + return answer + + def get_question(self, sample): + return sample["q"] + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + result = None + while result is None: + sample = self.metadata.iloc[index] + try: + self.relevant_dets = [] # initalize + self.relevant_dets_classes = [] + answer = self.get_answer_label(sample) + ret = { + "vid_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer + } + qa_texts = self.get_text(sample) + ret["text"] = qa_texts[0] + for i in range(self.draw_options_text - 1): + ret.update({f"options_text_{i}": qa_texts[i+1]}) + video_tensor = self.get_video(sample) + ret["image"] = video_tensor + result = True + except Exception as e: + print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}") + print("time stamp is: {}".format(sample['ts'])) + index = random.randint(0, len(self.metadata) - 1) + return ret + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqaplus.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqaplus.py new file mode 100644 index 0000000..61906b4 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/tvqaplus.py @@ -0,0 +1,148 @@ +from .video_base_dataset import BaseDataset +import os +import pandas as pd +import cv2 +import torch +from CoTrain.datasets.video.video_base_dataset import sample_frames + +# each sample: https://tvqa.cs.unc.edu/download_tvqa_plus.html +# { +# "answer_idx": "1", +# "qid": 134094, +# "ts": [5.99, 11.98], +# "a1": "Howard is talking to Raj and Leonard", +# "a0": "Howard is talking to Bernadette", +# "a3": "Howard is talking to Leonard and Penny", +# "a2": "Howard is talking to Sheldon , and Raj", +# "q": "Who is Howard talking to when he is in the lab room ?", +# "vid_name": "s05e02_seg02_clip_00", +# "a4": "Howard is talking to Penny and Bernadette", +# "bbox": { +# "14": [ +# { +# "img_id": 14, +# "top": 153, +# "label": "Howard", +# "width": 180, +# "height": 207, +# "left": 339 +# }, +# { +# "img_id": 14, +# "top": 6, +# "label": "lab", +# "width": 637, +# "height": 354, +# "left": 3 +# }, +# ... +# ], +# "20": [ ... ], +# "26": [ ... ], +# "32": [ ... ], +# "38": [ ... ] +# } +# } + + +class TVQAPLUSDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self._load_metadata() + if split == "train": + names = ["tvqaplus_train"] + elif split == "val": + names = ["tvqaplus_val"] + elif split == "test": + names = ["tvqaplus_test"] + + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + # for appear objects + self.only_use_relevant_dets = True + if self.only_use_relevant_dets: + self.relevant_dets = [] # resort the detection numbers + self.relevant_dets_classes = [] + + def _load_metadata(self): + # download specific + metadata_dir = './meta_data/tvqa' + split_files = { + 'train': 'tvqa_plus_train.jsonl', + 'val': 'tvqa_plus_val.jsonl', + 'test': 'tvqa_plus_test_public.jsonl' # no GT label for test set + } + target_split_fp = split_files[self.split] + metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True) + self.metadata = metadata + + def _get_image_path(self, sample): + rel_fp = sample['vid_name'] + return os.path.join(self.data_dir, rel_fp), rel_fp + + def _get_caption(self, sample): + return sample[0] + + # tvqaplus provide sampled frames (3 fps) + # To Do: considering sample one frame with bounding box + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_image_path(sample) + [beg_time, end_time] = sample['ts'] + clip_len = int((float(end_time) - float(beg_time)) * 3) + rel_frame_index = sample_frames(self.num_frames, clip_len) + # sample N frames here + frames = [] + for index in rel_frame_index: + img = cv2.imread(abs_fp + '{}.jpg'.format(index)) + frame = torch.from_numpy(img).byte() + frame = frame.permute(2, 0, 1) + frames.append(frame) + frames = torch.stack(frames).permute(1, 0, 2, 3) + return frames + + def get_text(self, sample): + question = self.get_question(sample) + qa_texts = [] + # 5 choices + for i in range(5): + raw_text = question + "[SEP]" + sample["a{}".format(i)] + qa_encoding = self.tokenizer( + raw_text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + qa_texts.append((raw_text, qa_encoding)) + return qa_texts + + def get_answer_label(self, sample): + answer = int(sample['answer_idx']) + return answer + + def get_question(self, sample): + return sample["q"] + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + sample = self.metadata.iloc[index] + self.relevant_dets = [] # initalize + self.relevant_dets_classes = [] + answer = self.get_answer_label(sample) + ret = { + "vid_index": index, + "cap_index": index, + "raw_index": index, + 'answer': answer + } + qa_texts = self.get_text(sample) + ret["text"] = qa_texts[0] + for i in range(self.draw_options_text - 1): + ret.update({f"options_text_{i}": qa_texts[i+1]}) + video_tensor = self.get_video(sample) + ret["video"] = video_tensor + return ret + diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ucf101.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ucf101.py new file mode 100644 index 0000000..d498657 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/ucf101.py @@ -0,0 +1,88 @@ +import numpy as np +from .video_base_dataset import BaseDataset +import os + + +class UCF101Dataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.ans_lab_dict = dict() + if split == "train": + names = ["ucf101_train"] + elif split == "val": + names = ["ucf101_val"] + elif split == "test": + names = ["ucf101_test"] + super().__init__( + *args, + **kwargs, + names=names, + text_column_name="questions", + remove_duplicate=False, + ) + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/ucf101' + split_files = { + 'train': 'hmdb51_rgb_train_split_1.txt', + 'val': 'hmdb51_rgb_val_split_1.txt', + 'test': 'hmdb51_rgb_val_split_1.txt' + } + target_split_fp = split_files[self.split] + self.metadata = [x.strip().split(' ') for x in open(os.path.join(metadata_dir, target_split_fp))] + answer_fp = os.path.join(metadata_dir, 'hmdb51_classInd.txt') + with open(answer_fp, 'r') as f: + lines = f.readlines() + for line in lines: + self.ans_lab_dict[str(int(line.strip().split(' ')[0]) - 1)] = line.strip().split(' ')[1] + + def _get_video_path(self, sample): + # self.ans_lab_dict[sample[2]], + return os.path.join(self.data_dir, sample[0].split('/')[-1]) + '.avi', sample[0].split('/')[-1] + '.avi' + + def get_text(self, sample): + text = "A person is doing [MASK]" + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return (text, encoding) + + def get_answer_label(self, sample): + text = "None" + ans_total_len = len(self.ans_lab_dict) + 1 # one additional class + ans_label = int(sample[2]) + scores = np.zeros(ans_total_len).astype(int) + scores[ans_label] = 1 + return text, ans_label, scores + # return text, ans_label_vector, scores + + def __getitem__(self, index): + sample = self.metadata[index] # .split(' ') + video_tensor = self.get_video(sample) + text = self.get_text(sample) + qid = index + if self.split != "test": + answers, labels, scores = self.get_answer_label(sample) + else: + answers = list() + labels = list() + scores = list() + + return { + "video": video_tensor, + "text": text, + "vqa_answer": answers, + "vqa_labels": labels, + "vqa_scores": scores, + "qid": qid, + } + + def __len__(self): + return len(self.metadata) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/video_base_dataset.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/video_base_dataset.py new file mode 100644 index 0000000..e258a8f --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/video_base_dataset.py @@ -0,0 +1,678 @@ +import random +import torch +import io +import os +import cv2 +import numpy as np +from PIL import Image +from CoTrain.transforms import keys_to_transforms +import decord +from decord import cpu +import imageio +# add for ytt asr clean +import ftfy +import regex as re +import demoji +import editdistance +import tslearn.metrics +import string +from CoTrain.transforms.video.videoaug import VideoTransform, video_aug +from CoTrain.datasets import client +import CoTrain.modules.InternVideo as internvideo + + +class BaseDataset(torch.utils.data.Dataset): + def __init__( + self, + data_dir: str, + transform_keys: list, + image_size: int, + names: list, + text_column_name: str = "", + remove_duplicate=True, + max_text_len=40, + draw_false_image=0, + draw_false_video=0, + draw_false_text=0, + image_only=False, + video_only=False, + num_frames=1, + draw_options_text=0, + backend='v100' + ): + """ + data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data + transform_keys : keys for generating augmented views of videos + text_column_name : pyarrow table column name that has list of strings as elements + """ + assert len(transform_keys) >= 1 + super().__init__() + + self.transforms = keys_to_transforms(transform_keys, size=image_size) + self.text_column_name = text_column_name + self.names = names + self.max_text_len = max_text_len + self.draw_false_video = draw_false_video + self.draw_false_text = draw_false_text + self.video_only = video_only + self.data_dir = data_dir + if len(names) != 0: + dataset_name = names[0].split('_')[0] + if dataset_name in ['tgif', 'tgifqa']: + dataset_name = 'tgif' + self.data_dir = os.path.join(self.data_dir, dataset_name) # e.g. webvid_train -> webvid + split_name = dataset_name + if torch.distributed.get_rank() == 0: + print('*'*100) + print("video datasets: {}".format(names)) + self.draw_options_text = draw_options_text + self.num_frames = num_frames + if torch.distributed.get_rank() == 0: + print("# frames for base dataset is: {}".format(self.num_frames)) + if split_name in ['msrvtt', 'cc3m', 'webvid', 'msvd', 'vcr', 'howto100m', 'ego4d', 'yttemporal', 'tgif', 'hmdb51', 'k400']: + if torch.distributed.get_rank() == 0: + print("no arrow available for {}, load from disk".format(names[0])) + else: + print("not support video dataset") + self.video_transform = VideoTransform(mode=self.split, crop_size=image_size, backend=backend) + self.video_aug = video_aug + + @property + def corpus(self): + return [text for texts in self.all_texts for text in texts] + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) + + def get_raw_image(self, index, image_key="image"): + index, caption_index = self.index_mapper[index] + image_bytes = io.BytesIO(self.table[image_key][index].as_py()) + image_bytes.seek(0) + return Image.open(image_bytes).convert("RGB") + + def _get_video_path(self, sample): + if self.names[0] in ['msrvtt_train', 'msrvtt_test', 'msrvtt_val']: + return os.path.join(self.data_dir, 'videos', 'all', sample.name + '.mp4'), sample.name + '.mp4' + else: + return os.path.join(self.data_dir, 'videos', 'all', str(sample['video_id']) + '.mp4'), str(sample['video_id']) + '.mp4' + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + imgs, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split) + if imgs is None: + raise Exception("Invalid img!", rel_fp) + else: + return imgs + + def get_video(self, sample): + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return videos_tensor + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return {f"false_video_{rep}": videos_tensor} + + def _get_caption(self, sample): + if self.names[0] in ['msrvtt_train']: + caption = random.choice(sample['captions']) + else: + caption = sample['captions'][0] + return caption + + def get_text(self, raw_index, sample): + text = self._get_caption(sample) + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + # print(encoding.size()) + return { + "text": (text, encoding), + "vid_index": raw_index, + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + text = self._get_caption(sample) + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + while result is None: + # retry_times += 1 + sample = self.metadata.iloc[index] + # print(sample[1]) + try: + video_tensor = self.get_video(sample) + ret = { + "video": video_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + } + if not self.video_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + print(f"Error while read file idx {sample.name} in {self.names[0]} -> {e}") + index = random.randint(0, len(self.metadata) - 1) + return ret + + def collate(self, batch, mlm_collator): + batch_size = len(batch) + keys = set([key for b in batch for key in b.keys()]) + dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys} + + video_keys = [k for k in list(dict_batch.keys()) if "video" in k] + video_sizes = list() + + # global & local video + for video_key in video_keys: + video_sizes += [ii.shape for i in dict_batch[video_key] if i is not None for ii in i] + # print(global_video_sizes, local_video_sizes) + + for size in video_sizes: + # print(size) + assert ( + len(size) == 4 + ), f"Collate error, an video should be in shape of (T, N, H, W), instead of given {size}" + + if len(video_keys) != 0: + global_max_height = max([i[2] for i in video_sizes]) + global_max_width = max([i[3] for i in video_sizes]) + global_min_height = min([i[2] for i in video_sizes]) + global_min_width = min([i[3] for i in video_sizes]) + for video_key in video_keys: + video = dict_batch[video_key] + view_size = len(video[0]) + if (view_size == 1 and + global_max_height == global_min_height and + global_max_width == global_min_width): + dict_batch[video_key] = [torch.stack([x[0] for x in video])] + continue + + assert False, (view_size, global_max_height, global_min_height, + global_max_width, global_min_width) + new_videos = [ + torch.zeros(batch_size, self.num_frames, 3, global_max_height, global_max_width) + for _ in range(view_size) + ] + for bi in range(batch_size): + orig_batch = video[bi] + for vi in range(view_size): + if orig_batch is None: + # new_videos[vi][bi] = None + # modify by alex + continue + else: + orig = video[bi][vi] + # print(orig.size()) + new_videos[vi][bi, :, :, : orig.shape[-2], : orig.shape[-1]] = orig + + dict_batch[video_key] = new_videos + + txt_keys = [k for k in list(dict_batch.keys()) if "text" in k] + # print(txt_keys) + if len(txt_keys) != 0: + texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys] + encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys] + draw_text_len = len(encodings) + flatten_encodings = [e for encoding in encodings for e in encoding] + flatten_mlms = mlm_collator(flatten_encodings) + + for i, txt_key in enumerate(txt_keys): + texts, encodings = ( + [d[0] for d in dict_batch[txt_key]], + [d[1] for d in dict_batch[txt_key]], + ) + + mlm_ids, mlm_labels = ( + flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)], + flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)], + ) + + input_ids = torch.zeros_like(mlm_ids) + attention_mask = torch.zeros_like(mlm_ids) + for _i, encoding in enumerate(encodings): + _input_ids, _attention_mask = ( + torch.tensor(encoding["input_ids"]), + torch.tensor(encoding["attention_mask"]), + ) + input_ids[_i, : len(_input_ids)] = _input_ids + attention_mask[_i, : len(_attention_mask)] = _attention_mask + + dict_batch[txt_key] = texts + dict_batch[f"{txt_key}_ids"] = input_ids + dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100) + dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids + dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels + dict_batch[f"{txt_key}_masks"] = attention_mask + + clip_text_ids, clip_special_tokens_mask = internvideo.tokenize( + dict_batch["text"], truncate=True, return_special_tokens_mask=True) + dict_batch["clip_text_ids"] = clip_text_ids + dict_batch["clip_special_tokens_mask"] = clip_special_tokens_mask + + return dict_batch + + +def sample_frames(num_frames, vlen, sample='rand', fix_start=None): + acc_samples = min(num_frames, vlen) + intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int) + ranges = [] + for idx, interv in enumerate(intervals[:-1]): + ranges.append((interv, intervals[idx + 1] - 1)) + if sample == 'rand': + frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges] + elif fix_start is not None: + frame_idxs = [x[0] + fix_start for x in ranges] + elif sample == 'uniform': + frame_idxs = [(x[0] + x[1]) // 2 for x in ranges] + else: + raise NotImplementedError + + return frame_idxs + + +def read_frames_gif(video_path, num_frames, mode='train', fix_start=None): + if mode == 'train': + sample = 'rand' + else: + sample = 'uniform' + gif = imageio.get_reader(video_path) + vlen = len(gif) + frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + frames = [] + for index, frame in enumerate(gif): + # for index in frame_idxs: + if index in frame_idxs: + frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB) + frame = torch.from_numpy(frame).byte() + # # (H x W x C) to (C x H x W) + frame = frame.permute(2, 0, 1) + # frame = Image.fromarray(frame) + frames.append(frame) + frames = torch.stack(frames) # .float() / 255 + # print(frames.size()) + return frames, frame_idxs, vlen + + +def read_frames_cv2(video_path, num_frames, sample='rand', fix_start=None): + # print(video_path) + cap = cv2.VideoCapture(video_path) + assert (cap.isOpened()) + # for decord + # cap.set(3, 256) + # cap.set(4, 256) + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # get indexes of sampled frames + frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + frames = [] + success_idxs = [] + for index in frame_idxs: + cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1) + ret, frame = cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = torch.from_numpy(frame).byte() + # # (H x W x C) to (C x H x W) + frame = frame.permute(2, 0, 1) + # frame = Image.fromarray(frame) + frames.append(frame) + success_idxs.append(index) + else: + pass + # print(frame_idxs, ' fail ', index, f' (vlen {vlen})') + # return frames tensor + # convert cv to PIL + # img = Image.fromarray(imgs[0]) + frames = torch.stack(frames) # .float() / 255 + # print(frames.size()) + cap.release() + return frames, success_idxs, vlen + + +def read_frames_decord(video_path, num_frames, mode='train', fix_start=None, width=256, height=256): + if "s3://" in video_path: + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + video_path = video_bytes + if isinstance(video_path, bytes): # create a readable and writable binary data stream object in memory + video_path = io.BytesIO(video_bytes) + if mode in ['train']: + sample = 'rand' + else: + sample = 'uniform' + video_reader = decord.VideoReader(video_path, width=width, height=height, num_threads=1, ctx=cpu(0)) + decord.bridge.set_bridge('torch') + vlen = len(video_reader) + frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + frames = video_reader.get_batch(frame_idxs).byte() + frames = frames.permute(0, 3, 1, 2).cpu() + return frames, frame_idxs, vlen + + +def read_frames_from_img_dir(video_path, num_frames, mode='train', fix_start=None, suffix='.jpg'): + if mode in ['train', 'val']: + sample = 'rand' + else: + sample = 'uniform' + vlen = len(os.listdir(video_path)) + frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start) + frames = [] + for idx in frame_idxs: + frame = cv2.imread(os.path.join(video_path, string(idx).zfill(3) + suffix)) + frame = torch.from_numpy(frame).byte() + frame = frame.permute(2, 0, 1) + frames.append(frame) + frames = frames.permute(0, 3, 1, 2) + return frames, frame_idxs, vlen + + +def sample_frames_2(frame_loc, vlen, frame_end): + assert frame_loc <= frame_end + frame_idxs = [frame_loc] + return frame_idxs + + +def read_large_frames_decord(video_path, frame_loc, frame_end, num_frames, mode='train', fix_start=None): + # print('*'*100) + # print(mode) + if mode == 'train': + sample = 'rand' + else: + sample = 'uniform' + # video_reader = decord.VideoReader(video_path, width=256, height=256, num_threads=1, ctx=cpu(0)) + video_reader = decord.VideoReader(video_path, width=512, height=512, num_threads=1, ctx=cpu(0)) + decord.bridge.set_bridge('torch') + # vlen = len(video_reader) + frame_idxs = sample_frames(num_frames, 120, sample=sample, fix_start=fix_start) + for i in range(len(frame_idxs)): + # if random.random() < 0.5: + # frame_idxs[i] += frame_loc - 60 + # else: + # frame_idxs[i] += frame_loc + frame_idxs[i] += frame_loc - 60 + frame_idxs[i] = min(frame_idxs[i], frame_end-1) + frame_idxs[i] = max(0, frame_idxs[i]) + # print(frame_loc, frame_end, frame_idxs) + frames = video_reader.get_batch(frame_idxs).byte() + frames = frames.permute(0, 3, 1, 2) + return frames + + +def video_clip_reader(video_path, begin_time, end_time, duration, num_frames): + cap = cv2.VideoCapture(video_path) + # assert (cap.isOpened()) + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # print(video_path, begin_time, end_time, duration, num_frames, vlen) + average_fps = vlen / duration + clip_len = (end_time - begin_time) * average_fps + frame_idxs = sample_frames(num_frames, int(clip_len), sample='rand') + frames = [] + success_idxs = [] + rel_index = int(begin_time * average_fps) + rel_index = max(rel_index, 0) + rel_index = min(rel_index, vlen-1) + for index in frame_idxs: + cap.set(cv2.CAP_PROP_POS_FRAMES, rel_index+index) + ret, frame = cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = torch.from_numpy(frame).byte() + # # (H x W x C) to (C x H x W) + frame = frame.permute(2, 0, 1) + # frame = Image.fromarray(frame) + frames.append(frame) + success_idxs.append(index) + else: + pass + # print(frame_idxs, ' fail ', index, f' (vlen {vlen})') + frames = torch.stack(frames) + cap.release() + if frames.size(0) < num_frames: + zeros = torch.ones((num_frames - frames.size(1), 3, frames.size(-2), frames.size(-1)), dtype=torch.uint8).byte() + frames = torch.cat((frames, zeros), axis=1) + if frames.size(0) != num_frames: + Exception(RuntimeError) + return frames + + +def video_reader(video_path, frame_loc, frame_end, num_frames): + cap = cv2.VideoCapture(video_path) + assert (cap.isOpened()) + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # get indexes of sampled frames fps is 30, 4s + frame_idxs = sample_frames(num_frames, 120, sample='rand') + # frame_idxs = sample_frames_2(frame_loc, vlen, frame_end) + frames = [] + success_idxs = [] + + for index in frame_idxs: + if random.random() < 0.5: + rel_index = index + frame_loc - 120 + else: + rel_index = index + frame_loc + rel_index = max(rel_index, 0) + rel_index = min(rel_index, frame_end) + cap.set(cv2.CAP_PROP_POS_FRAMES, rel_index) + ret, frame = cap.read() + if ret: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = torch.from_numpy(frame).byte() + # # (H x W x C) to (C x H x W) + frame = frame.permute(2, 0, 1) + # frame = Image.fromarray(frame) + frames.append(frame) + success_idxs.append(index) + else: + pass + # print(frame_idxs, ' fail ', index, f' (vlen {vlen})') + frames = torch.stack(frames) + cap.release() + return frames + + +def fast_decode(video_path, num_frames, mode='train', fix_start=None, fps=30): + cap = cv2.VideoCapture(video_path) + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + cap.release() + max_len = vlen/30 + num_sec = num_frames / float(fps) + size = 224 + crop_only = True + random_flip = True + start_seek = random.randint(0, int(max(max_len, max_len - num_sec))) + cmd = ( + ffmpeg + .input(video_path, ss=start_seek, t=num_sec + 0.1) + .filter('fps', fps=fps) + ) + if mode=='train': + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + else: + aw, ah = 0.5, 0.5 + if crop_only: + cmd = ( + cmd.crop('(iw - {})*{}'.format(size, aw), + '(ih - {})*{}'.format(size, ah), + str(size), str(size)) + ) + else: + cmd = ( + cmd.crop('(iw - min(iw,ih))*{}'.format(aw), + '(ih - min(iw,ih))*{}'.format(ah), + 'min(iw,ih)', + 'min(iw,ih)') + .filter('scale', size, size) + ) + if random_flip and random.uniform(0, 1) > 0.5: + cmd = cmd.hflip() + out, _ = ( + cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24') + .run(capture_stdout=True, quiet=True) + ) + video = np.frombuffer(out, np.uint8).reshape([-1, size, size, 3]) + video = torch.from_numpy(video) + video = video.permute(3, 0, 1, 2) + return video, _, _ + + +# 21 +colormaps = [(255, 0, 0), (0, 255, 0), +(0, 0, 255), (61, 145, 64), (127, 255, 212), (0, 201, 87), +(218, 112, 214), (255, 0, 255), (112, 128, 105), (250, 235, 215), +(240, 255, 255), (252, 230, 201), (255, 255, 0), (235, 142, 85), +(255, 97, 0), (176, 224, 230), (65, 106, 225,), (0, 255, 255), +(56, 94, 15), (8, 46, 84), (255, 192, 203)] + + +# only_use_relevant_dets ? + +def color_img(im, object_meta, relevant_dets, only_use_relevant_dets=True): + # mask detected region + # only_use_relevant_dets: if true, we only mask regions that mentioned in question & answers + # print(relevant_dets) + if only_use_relevant_dets: + boxes = [] + for index in relevant_dets: + boxes.append(object_meta['boxes'][index]) + # print(object_meta['names'][index]) + # object_index = relevant_dets + else: + boxes = object_meta['boxes'] + # print(len(boxes)) + # range(len(boxes)) + for i in range(len(boxes)): + if i > 20: + break + bbox = boxes[i] + # white_rect = cv2.applyColorMap(white_rect, i) + # only considering bounding box here (wo fine-grained segmentation) + sub_img = im[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] + white_rect = np.ones(sub_img.shape, dtype=np.uint8) * 255 + white_rect[:, :, 0] = colormaps[i][0] + white_rect[:, :, 1] = colormaps[i][1] + white_rect[:, :, 2] = colormaps[i][2] + res = cv2.addWeighted(sub_img, 0.7, white_rect, 0.3, 1.0) + im[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] = res + cv2.rectangle(im, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), colormaps[i], 3) + return im + + +def get_video_len(video_src): + cap = cv2.VideoCapture(video_src) + vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + cap.release() + return vlen + + +def align_using_dtw(input_asr, grover_output, radius_perc=0.1, radius_abs=32): + """ + :param input_asr: List of words + :param grover_output: List of words also, could be different size + :param radius_perc: Percent of input ASR + :param radius_abs: Absolute ntokens + :return: + """ + max_radius = int(max(len(input_asr) * radius_perc, radius_abs)) + # sometimes grover just keeps going + if len(grover_output) > len(input_asr): + grover_output = grover_output[:len(input_asr) + max_radius] + + # DONT give the alignment freedom if it's at the end of a sequence to just "give up" by padding with zeros + # Default value is high + auto2other = np.zeros((len(input_asr), len(grover_output)), dtype=np.float32) + 9999.0 + + def _preprocess_text(x): + return x.translate(str.maketrans('', '', string.punctuation)).strip().lower() + + input_asr_pre = [_preprocess_text(x) for x in input_asr] + input_gro_pre = [_preprocess_text(x) for x in grover_output] + for a_idx, a in enumerate(input_asr_pre): + start = max(a_idx - max_radius, 0) + end = min(a_idx + max_radius, len(input_gro_pre)) + for o_idx in range(start, end): + o = input_gro_pre[o_idx] + auto2other[a_idx, o_idx] = editdistance.eval(a, o) + + idxs, score = tslearn.metrics.dtw_path_from_metric(auto2other, metric='precomputed') + denoised_out = [[] for x in input_asr] + has_seen = -1 + for idx1, idx2 in idxs: + if (idx1 >= len(input_asr)) or (idx2 >= len(grover_output)): + break + if idx2 > has_seen: + # Basically don't add if it's a duplicate -- a grover output that matches to 2 things + # This often leads to slightly weird results because we really should match the next thing, but we instead matched the first thing + # e.g. + # input_asr_pre = ['much', 'of', 'a', 'pancake', 'waffle', 'person', 'so', 'i', 'love', 'a'] + # input_gro_pre = ['much', 'of', 'a', 'pancakewaffle', 'person', 'so', 'i', 'love', 'a', 'good'] + # but we align pancakewaffle-> pancake and person -> waffle AND person -> person + denoised_out[idx1].append(grover_output[idx2]) + has_seen = idx2 + return [' '.join(x) for x in denoised_out] + + +def clean_subtitles(subtitle_dicts): + """ + :param subtitle_dicts: {'word': X, 'time': Y} + :return: + """ + # Remove >> maybe using ftfy or something + new_dicts = [] + for x in subtitle_dicts: + if x['word'].startswith('&') or x['word'].endswith(';'): + continue + fixed_word = ftfy.ftfy(x['word']) + if len(fixed_word) == 0: + continue + x['word'] = fixed_word + new_dicts.append(x) + return new_dicts + + +def clean_description(text): + # Strip emojis first + all_emojis = demoji.findall(text) + for k, v in all_emojis.items(): + text = text.replace(k, f'[{v}]'.replace(' ', '')) + text = text.strip() + + # Remove URLs + # https://stackoverflow.com/questions/11331982/how-to-remove-any-url-within-a-string-in-python/11332580 + text = re.sub( + r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''', + "%", text) + + text = re.sub(' +', ' ', text) # Probably should have done + text = re.sub('\s*\n+', '\n', text) + text = text.strip() + return text diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid.py new file mode 100644 index 0000000..4ef4170 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid.py @@ -0,0 +1,139 @@ +from .video_base_dataset import BaseDataset, read_frames_decord +import random +import os +import pandas as pd +from .pack_meta import pack_metadata, unpack_metadata + + +class WEBVIDDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.cut = "jsfusion" + if split == "train": + names = ["webvid_train"] + elif split == "val": + names = ["webvid_val"] + elif split == "test": + names = ["webvid_val"] + self._load_metadata() + + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + if "s3://" in self.data_dir: + self.data_dir = "s3://video_pub/WebVid2M/" + + def _load_metadata(self): + metadata_dir = '/mnt/cache/share_data/DSK_datasets/webvid/' + split_files = { + 'train': 'results_2M_train.csv', + 'val': 'results_2M_val.csv', # there is no test + 'test': 'results_2M_val.csv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp)) + metadata = metadata[["name", "page_dir", "videoid"]] + self.metadata = pack_metadata(self, metadata) + + def _get_video_path(self, sample): + rel_video_fp = os.path.join(str(sample[1]), str(sample[2]) + '.mp4') + if "s3://" in self.data_dir: + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + else: + full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split) + if videos is None: + raise Exception("Invalid video!", rel_fp) + else: + return videos + + def get_video(self, index, sample): + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return { + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + } + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = unpack_metadata(self, random_index) + # can be different augmentation + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return {f"false_video_{rep}": videos_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + # print(text) + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + # print(encoding.size()) + return { + "text": (text, encoding), + "vid_index": raw_index, + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = unpack_metadata(self, random_index) + text = sample[0] + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + max_try = 10 + try_time = 0 + while result is None: + try_time += 1 + sample = unpack_metadata(self, index) + try: + ret = dict() + ret.update(self.get_video(index, sample)) + if not self.video_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + index = random.randint(0, len(self.metadata) - 1) + exc = e + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}") + try_time=0 + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid10m.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid10m.py new file mode 100644 index 0000000..c25424f --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid10m.py @@ -0,0 +1,140 @@ +from .video_base_dataset import BaseDataset, read_frames_decord +import random +import os +import pandas as pd +from .pack_meta import pack_metadata, unpack_metadata + + +class WEBVID10MDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.cut = "jsfusion" + if split == "train": + names = ["webvid10m_train"] + elif split == "val": + names = ["webvid10m_val"] + elif split == "test": + names = ["webvid10m_val"] + self._load_metadata() + + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + if "s3://" in self.data_dir: + self.data_dir = "s3://video_pub_new/WebVid10M/" + + def _load_metadata(self): + metadata_dir = '/mnt/cache/share_data/DSK_datasets/webvid/' + split_files = { + 'train': 'results_10M_train.csv', + 'val': 'results_10M_val.csv', # there is no test + 'test': 'results_10M_val.csv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp)) + metadata = metadata[["name", "page_dir", "videoid"]] + # metadata = metadata[:102400] + self.metadata = pack_metadata(self, metadata) + + def _get_video_path(self, sample): + rel_video_fp = os.path.join(str(sample[1]), str(sample[2]) + '.mp4') + if "s3://" in self.data_dir: + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + else: + full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split) + if videos is None: + raise Exception("Invalid video!", rel_fp) + else: + return videos + + def get_video(self, index, sample): + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return { + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + } + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = unpack_metadata(self, random_index) + # can be different augmentation + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return {f"false_video_{rep}": videos_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + # print(text) + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + # print(encoding.size()) + return { + "text": (text, encoding), + "vid_index": raw_index, + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = unpack_metadata(self, random_index) + text = sample[0] + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + max_try = 10 + try_time = 0 + while result is None: + try_time += 1 + sample = unpack_metadata(self, index) + try: + ret = dict() + ret.update(self.get_video(index, sample)) + if not self.video_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + index = random.randint(0, len(self.metadata) - 1) + exc = e + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}") + try_time=0 + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid_old.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid_old.py new file mode 100644 index 0000000..63f6aaa --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/webvid_old.py @@ -0,0 +1,140 @@ +from .video_base_dataset import BaseDataset, read_frames_decord +import random +import os +import pandas as pd + + +class WEBVIDDataset(BaseDataset): + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + self.metadata = None + self.cut = "jsfusion" + if split == "train": + names = ["webvid_train"] + elif split == "val": + names = ["webvid_val"] + elif split == "test": + names = ["webvid_val"] + self._load_metadata() + + print(names, ": ", len(self.metadata), "samples in total.") + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + if "s3://" in self.data_dir: + # Remove this fucking auto dir name + self.data_dir = os.path.dirname(self.data_dir) + # Add the real path + self.data_dir = os.path.join(self.data_dir, "WebVid2M") + + def _load_metadata(self): + metadata_dir = './meta_data/webvid' + split_files = { + 'train': 'webvid_training_success_full.tsv', + 'val': 'webvid_validation_success_full.tsv', # there is no test + 'test': 'webvid_validation_success_full.tsv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata + + def _get_video_path(self, sample): + rel_video_fp = sample[1] + '.mp4' + if "s3://" in self.data_dir: + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + else: + full_video_fp = os.path.join(self.data_dir, self.split, rel_video_fp) + return full_video_fp, rel_video_fp + + def _get_caption(self, sample): + return sample[0] + + def get_raw_video(self, sample): + abs_fp, rel_fp = self._get_video_path(sample) + videos, idxs, vlen = read_frames_decord(abs_fp, self.num_frames, mode=self.split) + if videos is None: + raise Exception("Invalid video!", rel_fp) + else: + return videos + + def get_video(self, index, sample): + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return { + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + } + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + # can be different augmentation + videos = self.get_raw_video(sample) + videos_tensor = self.video_aug(videos, self.video_transform) + return {f"false_video_{rep}": videos_tensor} + + def get_text(self, raw_index, sample): + text = sample[0] + # print(text) + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + # print(encoding.size()) + return { + "text": (text, encoding), + "vid_index": raw_index, + "cap_index": raw_index, + "raw_index": raw_index, + } + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + text = sample[0] + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def get_suite(self, index): + result = None + max_try = 10 + try_time = 0 + while result is None: + try_time += 1 + sample = self.metadata.iloc[index] + try: + ret = dict() + ret.update(self.get_video(index, sample)) + if not self.video_only: + txt = self.get_text(index, sample) + ret.update({"replica": True if txt["cap_index"] > 0 else False}) + ret.update(txt) + + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + index = random.randint(0, len(self.metadata) - 1) + exc = e + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}") + try_time=0 + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/youtube.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/youtube.py new file mode 100644 index 0000000..72d5102 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/youtube.py @@ -0,0 +1,255 @@ +from .video_base_dataset import BaseDataset +import torch as th +import pandas as pd +import os +import numpy as np +import random +import ffmpeg + +import io +import decord +import re +decord.bridge.set_bridge('torch') +from CoTrain.datasets import client + + +class YOUTUBEDataset(BaseDataset): + """Youtube Video-Text loader.""" + + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["youtube_train"] + elif split == "val": + names = ["youtube_val"] + elif split == "test": + names = ["youtube_test"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + self.metadata = None + self._load_metadata() + # for howto100 + self.min_time = 4.0 + self.size = 256 + self.fps = 2 + self.num_sec = self.num_frames / float(self.fps) + self.crop_only = True + if self.split == 'train': + self.center_crop = False + else: + self.center_crop = True + self.benchmark = False + self.num_candidates = 1 + self.random_flip = True + + self.data_dir = "s3://youtubeBucket/videos/" + self.caption_dir = "s3://liyizhuo/youtubeProcessed/" # TODO: Remove this piece of shit + + print(names, ": ", len(self.metadata), "samples in total.") + + def _load_metadata(self): + metadata_dir = './meta_data/youtube' + split_files = { + 'train': 'youtube_train.csv', + 'val': 'youtube_val.csv', # there is no test + 'test': 'youtube_val.csv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata["Name"] + + def read_frames_ffmpeg(self, video_path, start, end): + start_seek = random.randint(int(start), int(max(start, end - self.num_sec))) + cmd = ( + ffmpeg + .input(video_path, ss=start_seek, t=self.num_sec + 0.01) + .filter('fps', fps=self.fps) + ) + if self.center_crop: + aw, ah = 0.5, 0.5 + else: + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + if self.crop_only: + cmd = ( + cmd.crop('(iw - {})*{}'.format(self.size, aw), + '(ih - {})*{}'.format(self.size, ah), + str(self.size), str(self.size)) + ) + else: + cmd = ( + cmd.crop('(iw - min(iw,ih))*{}'.format(aw), + '(ih - min(iw,ih))*{}'.format(ah), + 'min(iw,ih)', + 'min(iw,ih)') + .filter('scale', self.size, self.size) + ) + if self.random_flip and random.uniform(0, 1) > 0.5: + cmd = cmd.hflip() + out, _ = ( + cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24').run(capture_stdout=True, quiet=True) + ) + # print(np.frombuffer(out, np.uint8).shape) + video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3]) + video_tensor = th.from_numpy(np.copy(video)) + video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide + if video_tensor.shape[1] < self.num_frames: + zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8) + video_tensor = th.cat((video_tensor, zeros), axis=1) + return video_tensor[:, :self.num_frames] + + def read_frames_decord(self, video_path, start, end): + # Build Decord conntainer + video_bytes = client.get(video_path) + assert video_bytes is not None, "Get video failed from {}".format(video_path) + if isinstance(video_bytes, bytes): + video_bytes = io.BytesIO(video_bytes) + container = decord.VideoReader(video_bytes, ctx=decord.cpu(0)) + + real_fps = container.get_avg_fps() + start_frame, end_frame = real_fps * start, real_fps * end + num_real_frames = self.num_frames / self.fps * real_fps + start_seek = random.randint(int(start_frame), int(max(start_frame, end_frame - num_real_frames))) + indexes = np.linspace(start_seek, start_seek + num_real_frames, self.num_frames, endpoint=False, dtype=int) + indexes = [x for x in indexes if x < len(container)] + assert len(indexes) > 0, "Failed to decode from {}".format(video_path) + frames = container.get_batch(indexes) # [T, H, W, C] + _, H, W, _ = frames.shape + assert self.crop_only + if self.center_crop: + aw, ah = 0.5, 0.5 + else: + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + + top = int((H - self.size) * ah) + left = int((W - self.size) * aw) + bottom, right = top + self.size, left + self.size + frames = frames[:, top:bottom, left:right, :] + + if self.random_flip and random.uniform(0, 1) > 0.5: + frames = frames.flip([-2]) + + video_tensor = frames.permute(3, 0, 1, 2) + 0.01 # prevent all dark vide + if video_tensor.shape[1] < self.num_frames: + zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8) + video_tensor = th.cat((video_tensor, zeros), axis=1) + # assert False, (video_tensor.min(), video_tensor.max()) + # assert False, video_tensor.shape + return video_tensor[:, :self.num_frames] + + # time consuming > load video, where is the csv file? (cfs->ceph) + def get_caption(self, caption): + lines = client.get(caption).decode().split("\n") + ind = random.randint(0, len(lines) - 1) + line = lines[ind + 1].split(",") + start, end = float(line[0]), float(line[1]) + text = ",".join(line[2:]) + # text = cap['text'].values[ind] + # start, end = cap['start'].values[ind], cap['end'].values[ind] + if end - start < self.min_time: + diff = self.min_time - end + start + start = max(0, start - diff / 2) + end = start + self.min_time + return text, start, end + + def get_text(self, sample): + caption_csv = self.get_caption_path(sample) + text, start, end = self.get_caption(caption_csv) + # print(text) + # TODO: May need to be improved for edge cases. + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {"text": (text, encoding)}, int(start), int(end) + + def get_caption_path(self, sample): + # example xx/xx/xx.mp4 -> xx.csv + return os.path.join(self.caption_dir, sample.split('.')[0] + '.csv') + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + caption_csv = self.get_caption_path(sample) + text, start, end = self.get_caption(caption_csv) + encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def _get_video_path(self, sample): + rel_video_fp = sample + if "s3://" in self.data_dir: + rel_video_fp = sample.strip() + full_video_fp = os.path.join(self.data_dir, rel_video_fp) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample, begin, end): + abs_fp, rel_fp = self._get_video_path(sample) + if "s3://" in abs_fp: + videos = self.read_frames_decord(abs_fp, begin, end).permute(1, 0, 2, 3) + else: + videos = self.read_frames_ffmpeg(abs_fp, begin, end).permute(1, 0, 2, 3) + if videos is None: + raise Exception("Invalid img!", rel_fp) + else: + return videos + + def get_video(self, sample, start, end): + videos = self.get_raw_video(sample, start, end) + videos_tensor = self.video_aug(videos, self.video_transform, byte=True) + return videos_tensor + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + caption_csv = self.get_caption_path(sample) + _, start, end = self.get_caption(caption_csv) + videos = self.get_raw_video(sample, start, end) + videos_tensor = self.video_aug(videos, self.video_transform, byte=True) + return {f"false_video_{rep}": videos_tensor} + + def get_suite(self, index): + result = None + max_try = 10 + try_time = 0 + while result is None: + try_time += 1 + sample = self.metadata.iloc[index] + try: + ret = dict() + text, start, end = self.get_text(sample) + ret.update(text) + videos_tensor = self.get_video(sample, start, end) + ret.update({ + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + }) + ret.update({"replica": True if ret["cap_index"] > 0 else False}) + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + except Exception as e: + index = random.randint(0, len(self.metadata) - 1) + exc = e + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]} with error {exc}") + try_time = 0 + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/datasets/video/yttemporal.py b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/yttemporal.py new file mode 100644 index 0000000..acd9255 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/datasets/video/yttemporal.py @@ -0,0 +1,267 @@ +from .video_base_dataset import BaseDataset, sample_frames, video_clip_reader, clean_subtitles, align_using_dtw +import torch as th +import pandas as pd +import os +import numpy as np +import random +import ffmpeg +import json +import ftfy + + +class YTTemporalDataset(BaseDataset): + """YTTemporal Video-Text loader.""" + + def __init__(self, *args, split="", **kwargs): + assert split in ["train", "val", "test"] + self.split = split + + if split == "train": + names = ["yttemporal_train"] + elif split == "val": + names = ["yttemporal_val"] + elif split == "test": + names = ["yttemporal_test"] + super().__init__(*args, **kwargs, names=names, text_column_name="caption") + + self.metadata = None + self._load_metadata() + self.min_time = 4.0 + self.size = 224 + self.fps = 2 + self.num_sec = self.num_frames / float(self.fps) + self.crop_only = True + if self.split == 'train': + self.center_crop = False + else: + self.center_crop = True + self.benchmark = False + self.num_candidates = 1 + self.random_flip = True + self._load_metadata() + + def _load_metadata(self): + metadata_dir = './meta_data/yttemporal' + split_files = { + 'train': 'train_success_2000000.csv', # _1000000 + 'val': 'val_success.csv', # there is no test + 'test': 'val_success.csv' + } + target_split_fp = split_files[self.split] + metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t') + self.metadata = metadata["Name"] + + def read_frames_ffmpeg(self, video_path, start, end): + start_seek = start + cmd = ( + ffmpeg + .input(video_path, ss=start_seek, t=end-start + 0.01) + .filter('fps', fps=self.fps) + ) + if self.center_crop: + aw, ah = 0.5, 0.5 + else: + aw, ah = random.uniform(0, 1), random.uniform(0, 1) + if self.crop_only: + cmd = ( + cmd.crop('(iw - {})*{}'.format(self.size, aw), + '(ih - {})*{}'.format(self.size, ah), + str(self.size), str(self.size)) + ) + else: + cmd = ( + cmd.crop('(iw - min(iw,ih))*{}'.format(aw), + '(ih - min(iw,ih))*{}'.format(ah), + 'min(iw,ih)', + 'min(iw,ih)') + .filter('scale', self.size, self.size) + ) + if self.random_flip and random.uniform(0, 1) > 0.5: + cmd = cmd.hflip() + out, _ = ( + cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24') + .run(capture_stdout=True, quiet=True) + ) + video = np.frombuffer(out, np.uint8).reshape([-1, self.size, self.size, 3]) + video_tensor = th.from_numpy(np.copy(video)) + video_tensor = video_tensor.permute(3, 0, 1, 2) + 0.01 + # print(video_tensor.size()) + # print(video_tensor) + if video_tensor.shape[1] < self.num_frames: + zeros = th.ones((3, self.num_frames - video_tensor.shape[1], self.size, self.size), dtype=th.uint8) + video_tensor = th.cat((video_tensor, zeros), axis=1) + # # uniform n frames + # frame_indexs = sample_frames(self.num_frames, video_tensor.size(1)) + # out_tensors = th.ones((3, self.num_frames, self.size, self.size), dtype=th.uint8) + # for i in range(self.num_frames): + # out_tensors[:, i] = video_tensor[:, frame_indexs[i]] + # print(out_tensors) + # return out_tensors + return video_tensor[:, :self.num_frames] + + # # sample fix number of words + # def get_caption(self, caption_csv, words_len=32): + # with open(caption_csv, 'r') as f: + # cap = json.load(f) + # # random choice words_len words + # video_len = int(cap["info"]["duration"]) + # all_text = cap["subtitles"] # [{'word': 'hey', 'time': 0.0}, {'word': 'guys', 'time': 0.149}] + # word_count = len(all_text) + # + # # clean noisy asr text + # all_text = clean_subtitles(all_text) + # vtt = pd.DataFrame(all_text) + # denoised_word_by_word = [] + # for x in cap['denoised']: + # # Ftfy just in case + # cleanasr = ftfy.ftfy(x['cleanasr']) + # denoised_word_by_word += cleanasr.split(' ') + # # Align + # vtt['denoised'] = align_using_dtw(vtt['word'], denoised_word_by_word) + # max_word = min(word_count - 1, words_len) + # begin_word_index = random.randint(0, word_count - max_word) + # text = "" + # for i in range(max_word): + # text += vtt['denoised'][begin_word_index + i] + ' ' + # start = float(all_text[begin_word_index]['time']) + # end = float(all_text[min(word_count-1, begin_word_index + max_word)]['time']) + # # print(text, start, end) + # return text, start, end, video_len + + # sample fix video length + def get_caption(self, caption_csv): + with open(caption_csv, 'r') as f: + cap = json.load(f) + # random choice 10s-15s video clips + video_len = int(cap["info"]["duration"]) + start = random.randint(0, max(1, video_len-15)) + random.random() + clip_len = random.randint(10, 15) + end = min(video_len-1, start + clip_len) + all_text = cap["subtitles"] # [{'word': 'hey', 'time': 0.0}, {'word': 'guys', 'time': 0.149}] + # clean noisy asr text + all_text = clean_subtitles(all_text) + vtt = pd.DataFrame(all_text) + denoised_word_by_word = [] + for x in cap['denoised']: + # Ftfy just in case + cleanasr = ftfy.ftfy(x['cleanasr']) + denoised_word_by_word += cleanasr.split(' ') + # Align + vtt['denoised'] = align_using_dtw(vtt['word'], denoised_word_by_word) + text = "" + origin_text = "" + for index, item in enumerate(all_text): + if float(item['time']) > start and float(item['time']) < end: + text += vtt['denoised'][index] + " " + origin_text += item['word'] + " " + # print(text) + # print(origin_text) + if len(text) < 10: + Exception(IndexError) + if end - start < self.min_time: + diff = self.min_time - end + start + start = max(0, start - diff / 2) + end = start + self.min_time + return text, start, end, video_len + + def get_text(self, sample): + caption_csv = self.get_caption_path(sample) + text, start, end, duration = self.get_caption(caption_csv) + # print(text, start, end) + # print(text) + # TODO: May need to be improved for edge cases. + encoding = self.tokenizer( + text, + padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {"text": (text, encoding)}, start, end, duration + + def get_caption_path(self, sample): + # YTTemporal/videos/subset_87/data/xx.mp4 -> YTTemporal/videos/subset_87/annotations/xx.csv + return os.path.join(self.data_dir, 'videos', sample.split('/')[0], 'annotations', sample.split('/')[-1][:-4] + '.json') + + def get_false_text(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + caption_csv = self.get_caption_path(sample) + text, start, end = self.get_caption(caption_csv) + encoding = self.tokenizer( + text, + # padding="max_length", + truncation=True, + max_length=self.max_text_len, + return_special_tokens_mask=True, + ) + return {f"false_text_{rep}": (text, encoding)} + + def _get_video_path(self, sample): + rel_video_fp = sample + full_video_fp = os.path.join(self.data_dir, 'videos', rel_video_fp) + return full_video_fp, rel_video_fp + + def get_raw_video(self, sample, begin, end, duration): + abs_fp, rel_fp = self._get_video_path(sample) + # print(abs_fp, rel_fp) + # imgs = self.read_frames_ffmpeg(abs_fp, begin, end).permute(1, 0, 2, 3) + videos = video_clip_reader(abs_fp, begin, end, duration, self.num_frames) + if videos.size(0) != self.num_frames: + raise Exception("video length not enough!", rel_fp) + if videos is None: + raise Exception("Invalid img!", rel_fp) + else: + return videos + + def get_video(self, sample, start, end, duration): + videos = self.get_raw_video(sample, start, end, duration) + videos_tensor = self.video_aug(videos, self.video_transform) + return videos_tensor + + def get_false_video(self, rep): + random_index = random.randint(0, len(self.metadata) - 1) + sample = self.metadata.iloc[random_index] + caption_csv = self.get_caption_path(sample) + _, start, end, duration = self.get_caption(caption_csv) + videos = self.get_raw_video(sample, start, end, duration) + videos_tensor = self.video_aug(videos, self.video_transform) + return {f"false_video_{rep}": videos_tensor} + + def get_suite(self, index): + result = None + max_try = 5 + try_time = 0 + while result is None: + try_time += 1 + sample = self.metadata.iloc[index] + # try: + ret = dict() + text, start, end, duration = self.get_text(sample) + ret.update(text) + videos_tensor = self.get_video(sample, start, end, duration) + # print(imgs_tensor.size()) + ret.update({ + "video": videos_tensor, + "vid_index": index, + "cap_index": index, + "raw_index": index, + }) + ret.update({"replica": True if ret["cap_index"] > 0 else False}) + for i in range(self.draw_false_video): + ret.update(self.get_false_video(i)) + for i in range(self.draw_false_text): + ret.update(self.get_false_text(i)) + result = True + # except Exception as e: + # # print(f"Error while read file idx {sample} in {self.names[0]} -> {e}") + # index = random.randint(0, len(self.metadata) - 1) + if try_time > max_try: + print(f"Exceed max time Error while read file idx {sample} in {self.names[0]}") + return ret + + def __len__(self): + return len(self.metadata) + + def __getitem__(self, index): + return self.get_suite(index) diff --git a/Downstream/multi-modalities-downstream/CoTrain/gadgets/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/gadgets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/gadgets/my_metrics.py b/Downstream/multi-modalities-downstream/CoTrain/gadgets/my_metrics.py new file mode 100644 index 0000000..23c311e --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/gadgets/my_metrics.py @@ -0,0 +1,84 @@ +import torch +from torchmetrics import Metric +# import torchmetrics as Metric + +def order_class_index(order): + """Return the index of the order in its full permutation. + + Args: + order (tensor): e.g. [0,1,2] + """ + classes = list(itertools.permutations(list(range(len(order))))) + return classes.index(tuple(order.tolist())) + + +class Accuracy(Metric): + def __init__(self, dist_sync_on_step=True): + super().__init__(dist_sync_on_step=dist_sync_on_step) + self.add_state("correct", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, logits, target, unfilterd=False): + logits, target = ( + logits.detach().to(self.correct.device), + target.detach().to(self.correct.device), + ) + preds = logits.argmax(dim=-1) + preds = preds[target != -100] + unfilter_num = target.numel() + target = target[target != -100] + if target.numel() == 0: + return 1 + + assert preds.shape == target.shape + + self.correct += torch.sum(preds == target) + if unfilterd: + # print("no filter") + self.total += unfilter_num + else: + self.total += target.numel() + + def compute(self): + return self.correct / self.total + + +class Scalar(Metric): + def __init__(self, dist_sync_on_step=True): + super().__init__(dist_sync_on_step=dist_sync_on_step) + self.add_state("scalar", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, scalar): + if isinstance(scalar, torch.Tensor): + scalar = scalar.detach().to(self.scalar.device) + else: + scalar = torch.tensor(scalar).float().to(self.scalar.device) + self.scalar += scalar + self.total += 1 + + def compute(self): + return self.scalar / self.total + + +class VQAScore(Metric): + def __init__(self, dist_sync_on_step=True): + super().__init__(dist_sync_on_step=dist_sync_on_step) + self.add_state("score", default=torch.tensor(0.0), dist_reduce_fx="sum") + self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum") + + def update(self, logits, target): + logits, target = ( + logits.detach().float().to(self.score.device), + target.detach().float().to(self.score.device), + ) + logits = torch.max(logits, 1)[1] + one_hots = torch.zeros(*target.size()).to(target) + one_hots.scatter_(1, logits.view(-1, 1), 1) + scores = one_hots * target + + self.score += scores.sum() + self.total += len(logits) + + def compute(self): + return self.score / self.total diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/__init__.py new file mode 100644 index 0000000..d0130cc --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/__init__.py @@ -0,0 +1 @@ +from .internvideo import * \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/bpe_simple_vocab_16e6.txt.gz b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/__init__.py new file mode 100644 index 0000000..dcc5619 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/__init__.py @@ -0,0 +1 @@ +from .clip import * diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/clip.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/clip.py new file mode 100644 index 0000000..f0972e2 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/clip.py @@ -0,0 +1,252 @@ +import hashlib +import os +import urllib +import warnings +from typing import Any, Union, List +from pkg_resources import packaging + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +# _tokenizer = _Tokenizer() + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load( + name: str, + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, download_root: str = None, + # evl + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, + use_checkpoint=False, checkpoint_num=[0, 0, 0], + ): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + download_root: str + path to download the model files; by default, it uses "~/.cache/clip" + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + with open(model_path, 'rb') as opened_file: + try: + # loading JIT archive + model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + + if not jit: + model = build_model( + state_dict or model.state_dict(), + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain, + init_zero=init_zero, use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, + ).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens):] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/model.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/model.py new file mode 100644 index 0000000..5dba2ad --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/model.py @@ -0,0 +1,368 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from torch.utils.checkpoint import checkpoint_sequential + +from . import utils + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential( + OrderedDict( + [ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)), + ] + ) + ) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = ( + self.attn_mask.to(dtype=x.dtype, device=x.device) + if self.attn_mask is not None + else None + ) + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + attn_mask: torch.Tensor = None, + use_checkpoint=False, + checkpoint_num=[0, 0], + ): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential( + *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)] + ) + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + + def forward(self, x: torch.Tensor): + if self.use_checkpoint and self.checkpoint_num[1] > 0: + segments = min(len(self.resblocks), self.checkpoint_num[1]) + return checkpoint_sequential(self.resblocks, segments, x) + else: + return self.resblocks(x) + + +class VideoIntern(nn.Module): + def __init__( + self, + embed_dim: int, + # vision + vision_width: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # uni + n_layers=4, + n_dim=768, + n_head=12, + drop_path_rate=0.0, + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_image_attnmap=True, + backbone='vit_2plus1d_dw_bias_b16', + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + use_checkpoint=False, + checkpoint_num=[0], + ): + super().__init__() + + self.vision_width = n_dim + + self.context_length = context_length + + self.visual = utils.__dict__[backbone]( + pretrained=False, + t_size=t_size, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + n_dim=n_dim, + n_head=n_head, + return_list=return_list, + drop_path_rate=drop_path_rate, + backbone_drop_path_rate=drop_path_rate, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + ) + + self.visual_ln_post = nn.LayerNorm(n_dim) + scale = n_dim**-0.5 + self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim)) + self.return_qk = use_image_attnmap + self.return_num = n_layers + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask(), + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter( + torch.empty(self.context_length, transformer_width) + ) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.embed_dim = embed_dim + + # We seperate the mask embedding to load pretrained model + self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width)) + + # # To keep the num_embeddings unchanged, we add this to embedded text + # self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + nn.init.normal_(self.text_mask_embedding, std=0.02) + # nn.init.constant_(self.eot_token_embedding, 0.0) + + proj_std = (self.transformer.width**-0.5) * ( + (2 * self.transformer.layers) ** -0.5 + ) + attn_std = self.transformer.width**-0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5) + + nn.init.constant_(self.visual_ln_post.weight, 1.0) + nn.init.constant_(self.visual_ln_post.bias, 0.0) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_video( + self, video, return_all_feats=False, masked_indices=None, mode="video" + ): + # video: [N, C, T, H, W] + feats = self.visual(video, return_all_feats=return_all_feats, mode=mode) + if return_all_feats: + x, feats = feats + else: + x = feats + x = self.visual_ln_post(x) + if self.visual_proj is not None: + x = x @ self.visual_proj + + if return_all_feats: + return x, feats # [N, C], [L, N, T, C] + + return x + + def encode_text(self, text, masked_indices=None, return_all_feats=False): + # assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \ + # "The last token of each sentence should be eot_token, check the input" + + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + # x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding + + if masked_indices is not None: + x[masked_indices] = self.text_mask_embedding + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + if self.text_projection is not None: + feats = feats @ self.text_projection + + if return_all_feats: + return feats, x + + return feats + + +def build_model( + state_dict: dict, + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0.0, + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + spatial_size=14, + use_t_conv=True, + use_image_attnmap=True, + use_t_pos_embed=True, + no_pretrain=False, + init_zero=True, + use_checkpoint=False, + checkpoint_num=[0], +): + + if "visual.proj" in state_dict: + state_dict["visual_proj"] = state_dict["visual.proj"] + state_dict["visual_ln_post.weight"] = state_dict["visual.ln_post.weight"] + state_dict["visual_ln_post.bias"] = state_dict["visual.ln_post.bias"] + del state_dict["visual.proj"], state_dict["visual.ln_post.weight"], state_dict["visual.ln_post.bias"] + + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [ + k + for k in state_dict.keys() + if k.startswith("visual.") and k.endswith(".attn.in_proj_weight") + ] + ) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len( + set( + k.split(".")[2] + for k in state_dict + if k.startswith(f"transformer.resblocks") + ) + ) + + try: + vision_width = state_dict["visual_proj"].shape[0] + except: + vision_width = state_dict["visual.proj"].shape[0] + n_dim = vision_width + if vision_width == 768: + backbone = "vit_only_global_b16" + n_head = 12 + return_list = [8, 9, 10, 11] + elif vision_width == 1024: + backbone = "vit_only_global_l14" + n_head = 16 + return_list = [20, 21, 22, 23] + else: + raise NotImplementedError + + model = VideoIntern( + embed_dim, + vision_width, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + t_size=t_size, + use_image_attnmap=use_image_attnmap, + backbone=backbone, + return_list=return_list, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + + def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + + def load_state_dict_3d(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + print(f'Ignore: {k}') + continue + print(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + + load_state_dict_3d(model, state_dict) + + return model.eval() diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/simple_tokenizer.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/__init__.py new file mode 100644 index 0000000..2dd97f0 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/__init__.py @@ -0,0 +1,2 @@ +# from .evl_module import TransformerDecoder +from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention.py new file mode 100644 index 0000000..28ce56d --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module.py new file mode 100644 index 0000000..277fd32 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module.py @@ -0,0 +1,315 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import pad, linear, softmax, dropout + +Tensor = torch.Tensor + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module_bias.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module_bias.py new file mode 100644 index 0000000..969eedd --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module_bias.py @@ -0,0 +1,318 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import pad, linear, softmax, dropout + +Tensor = torch.Tensor + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False, + rpb: Tensor = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + - rpb: relative postion bias + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + if rpb is not None: + attn_output_weights = attn_output_weights + rpb + attn_output_weights = softmax(attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/clip_vit_only_global.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/clip_vit_only_global.py new file mode 100644 index 0000000..1817e4a --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/clip_vit_only_global.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + +import logging +logger = logging.getLogger(__name__) + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, drop_path=0.0, + ): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + # logger.info(f'Drop path rate: {drop_path}') + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8, use_checkpoint=False): + # x: 1+HW, NT, C + # MHSA + if use_checkpoint: + attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x)) + x = x + self.drop_path(attn_out) + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + # FFN + if use_checkpoint: + mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x)) + x = x + self.drop_path(mlp_out) + else: + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Extractor(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, + mlp_factor=4.0, dropout=0.0, drop_path=0.0, + ): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + # logger.info(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + # zero init + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + super().__init__() + self.T = t_size + self.return_list = return_list + # Backbone + b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, attn_mask, + drop_path=b_dpr[i], + ) for i in range(layers) + ]) + # checkpoint + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + # logger.info(f'Use checkpoint: {self.use_checkpoint}') + # logger.info(f'Checkpoint number: {self.checkpoint_num}') + + # Extractor + assert n_layers == len(return_list) + self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim)) + self.dpe = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.dpe: + nn.init.constant_(m.bias, 0.) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + Extractor( + n_dim, n_head, mlp_factor=mlp_factor, + dropout=mlp_dropout[i], drop_path=dpr[i], + ) for i in range(n_layers) + ]) + # # projection + # self.proj = nn.Sequential( + # nn.LayerNorm(n_dim), + # nn.Dropout(cls_dropout), + # nn.Linear(n_dim, num_classes), + # ) + self.balance = nn.Parameter(torch.zeros((n_dim))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, mode='video', return_all_feats=False): + if mode == 'video': + T_down = self.T + else: + T_down = 1 + L, NT, C = x.shape + N = NT // T_down + H = W = int((L - 1) ** 0.5) + cls_token = self.temporal_cls_token.repeat(1, N, 1) + + j = -1 + for i, resblock in enumerate(self.resblocks): + if self.use_checkpoint and i < self.checkpoint_num[0]: + x = resblock(x, T_down, use_checkpoint=True) + else: + x = resblock(x, T_down) + if i in self.return_list: + j += 1 + tmp_x = x.clone() + tmp_x = tmp_x.view(L, N, T_down, C) + # dpe + _, tmp_feats = tmp_x[:1], tmp_x[1:].clone() + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W) + tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1) + tmp_x[1:] = tmp_x[1:] + tmp_feats + # enhancer + tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + cls_token = self.dec[j](cls_token, tmp_x) + + weight = self.sigmoid(self.balance) + residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C + # return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual) + feats = (1 - weight) * cls_token[0, :, :] + weight * residual + if return_all_feats: + return feats, x.view(L, N, T_down, C) + + return feats + + +class VisionTransformer(nn.Module): + def __init__( + self, + # backbone + input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + # extractor + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer( + width, layers, heads, + backbone_drop_path_rate=backbone_drop_path_rate, + use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size, + return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, num_classes=num_classes, + ) + + def forward(self, x, mode='video', return_all_feats=False): + x = self.conv1(x) # shape = [*, width, grid, grid] + N, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + out = self.transformer(x, mode=mode, return_all_feats=return_all_feats) + return out + + +def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + logger.info(f'Ignore: {k}') + continue + logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + + +def vit_only_global_b32( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_b16( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14_336( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 8 + + model = vit_only_global_l14( + pretrained=False, + t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4, + use_checkpoint=True, checkpoint_num=[0], + ) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + logger.info(flop_count_table(flops, max_depth=1)) + logger.info(time.time()-s) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/internvideo.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/internvideo.py new file mode 100644 index 0000000..b6aa80d --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/internvideo.py @@ -0,0 +1,101 @@ +import torch +import numpy as np +import decord + +from typing import Any, OrderedDict, Union, List +from pkg_resources import packaging +from torchvision import transforms + +from . import video_transform +from .simple_tokenizer import SimpleTokenizer as _Tokenizer +from .clip_utils.model import build_model +from .clip_utils import load as load + + +__all__ = ["load_model", "load_video", "tokenize", "load"] +_tokenizer = _Tokenizer() + + +def load_model(path): + state = torch.load(path, map_location="cpu")["state_dict"] + state = {k[len("clip.") :]: v for k, v in state.items() if k.startswith("clip.")} + model = build_model(state_dict=state) + return model + + +def load_video(path): + video_reader = decord.VideoReader(path, num_threads=1, ctx=decord.cpu(0)) + decord.bridge.set_bridge('torch') + video_len = len(video_reader) + video = video_reader.get_batch(np.linspace(0, video_len - 1, 8).astype(np.int)).byte() + video = video.permute(3, 0, 1, 2) + + input_mean = [0.48145466, 0.4578275, 0.40821073] + input_std = [0.26862954, 0.26130258, 0.27577711] + crop_size, scale_size = 224, 256 + trans = transforms.Compose([ + video_transform.TensorToNumpy(), + video_transform.Resize(scale_size), + video_transform.CenterCrop(crop_size), + video_transform.ClipToTensor(channel_nb=3), + video_transform.Normalize(mean=input_mean, std=input_std) + ]) + + video = trans(video) + return video + + +def tokenize( + texts: Union[str, List[str]], + context_length: int = 77, + truncate: bool = False, + return_special_tokens_mask: bool = False, +) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError( + f"Input {texts[i]} is too long for context length {context_length}" + ) + result[i, : len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens) :] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/simple_tokenizer.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/video_transform.py b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/video_transform.py new file mode 100644 index 0000000..78a501d --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/video_transform.py @@ -0,0 +1,752 @@ +import numbers +import random +import numpy as np +import PIL +import skimage +import skimage.transform +import torchvision +import torch +from torchvision import transforms +from PIL import Image +import torch +import cv2 + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.NEAREST + else: + pil_inter = PIL.Image.BILINEAR + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip_test.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + dim = len(mean) + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + # print(clip_test.size()) + # if dim == 3: + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + # else: + # clip_test.sub_(mean[:, None, None]).div_(std[:, None, None]) + return clip + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format + """ + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip_test (list of numpy.ndarray): clip_test (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = tensor_clip.div(255) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor + """ + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor + +class ColorDistortion(object): + def __init__(self, s=1.0): + self.s = s + self.color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s) + self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8) + self.rnd_gray = transforms.RandomGrayscale(p=0.2) + + def __call__(self, video): + color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray]) + return color_distort(video) + + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip_test + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = crop_clip(clip, y1, x1, h, w) + + return cropped + + +class CornerCrop(object): + + def __init__(self, size, crop_position=None): + self.size = size + if crop_position is None: + self.randomize = True + else: + self.randomize = False + self.crop_position = crop_position + self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br'] + + def __call__(self, imgs): + t, h, w, c = imgs.shape + corner_imgs = list() + for n in self.crop_positions: + #print(n) + if n == 'c': + th, tw = (self.size, self.size) + x1 = int(round((w- tw) / 2.)) + y1 = int(round((h - th) / 2.)) + x2 = x1 + tw + y2 = y1 + th + elif n == 'tl': + x1 = 0 + y1 = 0 + x2 = self.size + y2 = self.size + elif n == 'tr': + x1 = w - self.size + y1 = 0 + x2 = w + y2 = self.size + elif n == 'bl': + x1 = 0 + y1 = h - self.size + x2 = self.size + y2 = h + elif n == 'br': + x1 = w - self.size + y1 = h - self.size + x2 = w + y2 = h + corner_imgs.append(imgs[:, y1:y2, x1:x2, :]) + return corner_imgs + + def randomize_parameters(self): + if self.randomize: + self.crop_position = self.crop_positions[random.randint( + 0, + len(self.crop_positions) - 1)] + + +class RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class STA_RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + bsz = len(clip) + angle = random.uniform(self.degrees[0], self.degrees[1]) + angles = [(i+1)/(bsz+1) * angle for i in range(bsz)] + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class Each_RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + bsz = len(clip) + angles = [random.uniform(self.degrees[0], self.degrees[1]) for i in range(bsz)] + # print(angles) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip_test + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip_test (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img sync_dir function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class EachColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip_test + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip_test (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img sync_dir function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip_test with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this sync_dir + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This sync_dir acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip_test of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip_test. + """ + return normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class TensorToNumpy(object): + + def __init__(self): + pass + + def __call__(self, clip): + np_clip = clip.permute(1, 2, 3, 0).cpu().detach().numpy() + pil_clip = [Image.fromarray(np.uint8(numpy_image)).convert('RGB') for numpy_image in np_clip] + return pil_clip diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/modules/__init__.py new file mode 100644 index 0000000..fa587b1 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/__init__.py @@ -0,0 +1,4 @@ +# from CoTrain.modules.cotrain_dino_module_v2 import CoTrainTransformerSS +from CoTrain.modules.cotrain_module import CoTrainTransformerSS +# from CoTrain.modules.cotrain_dino_module_v3 import CoTrainTransformerSS +from CoTrain.modules.clip_module import CLIP \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/base_vision_transformer.py b/Downstream/multi-modalities-downstream/CoTrain/modules/base_vision_transformer.py new file mode 100644 index 0000000..e3a0ed0 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/base_vision_transformer.py @@ -0,0 +1,1532 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 + +The official jax code is released and available at https://github.com/google-research/vision_transformer + +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +DeiT model defs and weights from https://github.com/facebookresearch/deit, +paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import logging +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import hashlib +import os +import urllib +import warnings + +from functools import partial +from tqdm import tqdm + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.helpers import load_pretrained +from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ +from timm.models.resnet import resnet26d, resnet50d +from timm.models.resnetv2 import ResNetV2 +from timm.models.registry import register_model +from torchvision import transforms +from einops import rearrange, repeat +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from torch import einsum, nn +import sys +sys.path.append("pretrained") + +_logger = logging.getLogger(__name__) + + +def download_clip( + url: str = "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + root: str = os.path.expanduser("../../pretrained/torch/hub/checkpoints"), +): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if ( + hashlib.sha256(open(download_target, "rb").read()).hexdigest() + == expected_sha256 + ): + return download_target + else: + warnings.warn( + f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" + ) + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if ( + hashlib.sha256(open(download_target, "rb").read()).hexdigest() + != expected_sha256 + ): + raise RuntimeError( + f"Model has been downloaded but the SHA256 checksum does not not match" + ) + + return download_target + + +class UnNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor): + for t, m, s in zip(tensor, self.mean, self.std): + t.mul_(s).add_(m) + return tensor + + +inception_unnormalize = transforms.Compose( + [UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] +) + + +def _cfg(url="", **kwargs): + return { + "url": url, + "num_classes": 1000, + "input_size": (3, 224, 224), + "pool_size": None, + "crop_pct": 0.9, + "interpolation": "bicubic", + "mean": IMAGENET_DEFAULT_MEAN, + "std": IMAGENET_DEFAULT_STD, + "first_conv": "patch_embed.proj", + "classifier": "head", + **kwargs, + } + + +default_cfgs = { + # patch models (my experiments) + "vit_small_patch16_224": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/vit_small_p16_224-15ec54c9.pth", + ), + # patch models (weights ported from official Google JAX impl) + "vit_base_patch16_224": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth", + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_base_patch32_224": _cfg( + url="", # no official model weights for this combo, only for in21k + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_base_patch16_384": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth", + input_size=(3, 384, 384), + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + crop_pct=1.0, + ), + "vit_base_patch32_384": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p32_384-830016f5.pth", + input_size=(3, 384, 384), + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + crop_pct=1.0, + ), + "vit_large_patch16_224": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth", + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_large_patch32_224": _cfg( + url="", # no official model weights for this combo, only for in21k + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_large_patch16_384": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth", + input_size=(3, 384, 384), + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + crop_pct=1.0, + ), + "vit_large_patch32_384": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth", + input_size=(3, 384, 384), + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + crop_pct=1.0, + ), + # patch models, imagenet21k (weights ported from official Google JAX impl) + "vit_base_patch16_224_in21k": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth", + num_classes=21843, + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_base_patch32_224_in21k": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth", + num_classes=21843, + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_large_patch16_224_in21k": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth", + num_classes=21843, + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_large_patch32_224_in21k": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth", + num_classes=21843, + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + "vit_huge_patch14_224_in21k": _cfg( + url="", # FIXME I have weights for this but > 2GB limit for github release binaries + num_classes=21843, + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + ), + # hybrid models (weights ported from official Google JAX impl) + "vit_base_resnet50_224_in21k": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth", + num_classes=21843, + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + crop_pct=0.9, + first_conv="patch_embed.backbone.stem.conv", + ), + "vit_base_resnet50_384": _cfg( + url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth", + input_size=(3, 384, 384), + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5), + crop_pct=1.0, + first_conv="patch_embed.backbone.stem.conv", + ), + # hybrid models (my experiments) + "vit_small_resnet26d_224": _cfg(), + "vit_small_resnet50d_s3_224": _cfg(), + "vit_base_resnet26d_224": _cfg(), + "vit_base_resnet50d_224": _cfg(), + # deit models (FB weights) + "vit_deit_tiny_patch16_224": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth" + ), + "vit_deit_small_patch16_224": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth" + ), + "vit_deit_base_patch16_224": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth", + ), + "vit_deit_base_patch16_384": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth", + input_size=(3, 384, 384), + crop_pct=1.0, + ), + "vit_deit_tiny_distilled_patch16_224": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth" + ), + "vit_deit_small_distilled_patch16_224": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth" + ), + "vit_deit_base_distilled_patch16_224": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth", + ), + "vit_deit_base_distilled_patch16_384": _cfg( + url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth", + input_size=(3, 384, 384), + crop_pct=1.0, + ), +} + + +class Mlp(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + +def attn(q, k, v): + sim = einsum('b i d, b j d -> b i j', q, k) + attn = sim.softmax(dim=-1) + out = einsum('b i j, b j d -> b i d', attn, v) + return out + + +class VarAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., + initialize='random'): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + if initialize == 'zeros': + self.qkv.weight.data.fill_(0) + self.qkv.bias.data.fill_(0) + # fill proj weight with 1 here to improve training dynamics. Otherwise temporal attention inputs + # are multiplied by 0*0, which is hard for the model to move out of. + self.proj.weight.data.fill_(1) + self.proj.bias.data.fill_(0) + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, einops_from, einops_to, **einops_dims): + h = self.num_heads + # project x to q, k, v vaalues + q, k, v = self.qkv(x).chunk(3, dim=-1) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + q *= self.scale + + # splice out CLS token at index 1 + (cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v)) + + # let CLS token attend to key / values of all patches across time and space + cls_out = attn(cls_q, k, v) + # rearrange across time or space + q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_)) + + # expand cls token keys and values across time or space and concat + r = q_.shape[0] // cls_k.shape[0] + cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v)) + + k_ = torch.cat((cls_k, k_), dim=1) + v_ = torch.cat((cls_v, v_), dim=1) + + # attention + out = attn(q_, k_, v_) + + # merge back time or space + out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims) + + # concat back the cls token + out = torch.cat((cls_out, out), dim=1) + + # merge back the heads + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + ## to out + x = self.proj(out) + x = self.proj_drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, mask=None): + B, N, C = x.shape + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .permute(2, 0, 3, 1, 4) + ) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + if mask is not None: + mask = mask.bool() + attn = attn.masked_fill(~mask[:, None, None, :], float("-inf")) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x, attn + + +class Block(nn.Module): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + def forward(self, x, mask=None): + _x, attn = self.attn(self.norm1(x), mask=mask) + x = x + self.drop_path(_x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x, attn + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + no_patch_embed_bias=False, + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=patch_size, + bias=False if no_patch_embed_bias else True, + ) + + def forward(self, x): + # print(x.size()) + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + x = self.proj(x) + return x + + +# from https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/mmaction/models/backbones/swin_transformer.py + +class PatchEmbed3D(nn.Module): + """ Video to Patch Embedding. + Args: + patch_size (int): Patch token size. Default: (3,16,16). + in_chans (int): Number of input video channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 768. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + def __init__(self, patch_size=(3, 16, 16), in_chans=3, embed_dim=768, norm_layer=None): + super().__init__() + self.patch_size = patch_size + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, D, H, W = x.size() + if W % self.patch_size[2] != 0: + x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) + if H % self.patch_size[1] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1])) + if D % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0])) + + x = self.proj(x) # B C D Wh Ww + if self.norm is not None: + D, Wh, Ww = x.size(2), x.size(3), x.size(4) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww) + + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + representation_size=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.1, + norm_layer=None, + add_norm_before_transformer=False, + no_patch_embed_bias=False, + config=None, + ): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + drop_rate = drop_rate if config is None else config["drop_rate"] + + self.num_classes = num_classes + self.num_features = ( + self.embed_dim + ) = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + self.add_norm_before_transformer = add_norm_before_transformer + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + + self.patch_size = patch_size + self.patch_dim = img_size // patch_size + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + # print(img_size, patch_size, self.patch_dim) + # # =================== temporal embedding ====================== + self.num_frames = config['num_frames'] + print("# frames for module is: {}".format(self.num_frames)) + self.temporal_embed = nn.Parameter(torch.zeros(1, config['num_frames'], embed_dim)) + # self.patches_per_frame = 49 # 7x7 for patch with 32 resolution + self.patches_per_frame = self.patch_dim ** 2 # 7x7 for patch with 32 resolution + # # === + if add_norm_before_transformer: + self.pre_norm = norm_layer(embed_dim) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + ) + for i in range(depth) + ] + ) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=0.02) + trunc_normal_(self.cls_token, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def mask_tokens(self, orig_image, feats): + """ + Prepare masked tokens inputs/labels for masked patch prediction: 80% MASK, 10% random, 10% original. + """ + img_unnorm = orig_image * 0.5 + 0.5 + _, _, ph, pw = self.patch_embed.proj.weight.shape + with torch.no_grad(): + img_unnorm_patch = F.conv2d( + img_unnorm, + weight=torch.ones(3, 1, ph, pw).to(img_unnorm) / (ph * pw), + bias=None, + stride=(ph, pw), + padding=0, + groups=3, + ) + labels = ( + ((img_unnorm_patch * 255).long().flatten(start_dim=2, end_dim=3)) + .permute(0, 2, 1) + .contiguous() + ) + + # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) + probability_matrix = torch.full(labels.shape[:-1], 0.15) + masked_indices = torch.bernoulli(probability_matrix).bool() + labels[~masked_indices] = -100 # We only compute loss on masked tokens + + # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) + indices_replaced = ( + torch.bernoulli(torch.full(labels.shape[:-1], 0.8)).bool() & masked_indices + ) + feats[indices_replaced] = self.mask_token.to(feats) + + return feats, labels + + def visual_embed(self, _x, max_image_len=200, mask_it=False, mode='video'): + # for special case of irtr_recall + if len(_x.size()) == 5: + _x = _x.contiguous().view(-1, _x.size(2), _x.size(3), _x.size(4)) + _, _, ph, pw = self.patch_embed.proj.weight.shape + # print(_x.size()) + # if len(_x.size()) == 4: + # _x = _x.unsqueeze(1) + # # print(_x.size()) # 64x4x3x224x224 + # # _b = _x.size(0) + # # print(_x) + # _x = _x.contiguous().view(-1, _x.size(2), _x.size(3), _x.size(4)) + # print(_x.size()) + x = self.patch_embed(_x) + # # average along time dimension + # x = x.view(_b, -1, x.size(1), x.size(2), x.size(3)) + # x = torch.mean(x, dim=1) + # x = x.view(_b, x.size(1), x.size(2), -1) + # print(x.size()) + x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :] # only use first image + # print(x_mask.size()) + x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long() + x_h = x_mask[:, 0].sum(dim=1)[:, 0] + x_w = x_mask[:, 0].sum(dim=2)[:, 0] + + B, C, H, W = x.shape + B = B + spatial_pos = ( + self.pos_embed[:, 1:, :] + .transpose(1, 2) + .view(1, C, self.patch_dim, self.patch_dim) + ) + pos_embed = torch.cat( + [ + F.pad( + F.interpolate( + spatial_pos, size=(h, w), mode="bilinear", align_corners=True, + ), + (0, W - w, 0, H - h), + ) + for h, w in zip(x_h, x_w) + ], + dim=0, + ) + + pos_embed = pos_embed.flatten(2).transpose(1, 2) + x = x.flatten(2).transpose(1, 2) + patch_index = ( + torch.stack( + torch.meshgrid( + torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1]) + ), + dim=-1, + )[None, None, :, :, :] + .expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1) + .flatten(1, 3) + ) + x_mask = x_mask.flatten(1) + + if mask_it: + x, label = self.mask_tokens(_x, x) + + if ( + max_image_len < 0 + or max_image_len is None + or not isinstance(max_image_len, int) + ): + # suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked) + # (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get. + # if self.patch_size = 32, 25 * 41 = 1025 + # if res is 384 x 640, 12 * 20 = 240 + eff = x_h * x_w + max_image_len = eff.max() + else: + eff = x_h * x_w + max_image_len = min(eff.max(), max_image_len) + + valid_idx = x_mask.nonzero(as_tuple=False) + non_valid_idx = (1 - x_mask).nonzero(as_tuple=False) + unique_rows = valid_idx[:, 0].unique() + valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows] + non_valid_row_idx = [ + non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows + ] + + valid_nums = [v.size(0) for v in valid_row_idx] + non_valid_nums = [v.size(0) for v in non_valid_row_idx] + pad_nums = [max_image_len - v for v in valid_nums] + + select = list() + for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)): + if p <= 0: + valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len) + select.append(valid_row_idx[i][valid_choice]) + else: + pad_choice = torch.multinomial( + torch.ones(nv).float(), p, replacement=True + ) + select.append( + torch.cat( + [valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0, + ) + ) + + select = torch.cat(select, dim=0) + x = x[select[:, 0], select[:, 1]].view(B, -1, C) + x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1) + patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2) + pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C) # 48, 196, 768 + # # === alex: add temporal pos embed here + # temporal embed needs to be repeated within each frame (this does [1,2,3] --> [1,1,1,2,2,2,3,3,3]...) + if mode == 'video': + self.patches_per_frame = x.size(1) + tile_temporal_embed = self.temporal_embed.repeat_interleave(self.patches_per_frame, 1) + # tile_temporal_embed = tile_temporal_embed.view(-1, self.num_frames, tile_temporal_embed.size(-1)) + # print(pos_embed.size(), tile_temporal_embed.size()) + pos_embed = pos_embed.view(pos_embed.size(0)//self.num_frames, -1, pos_embed.size(-1)) + pos_embed = pos_embed + tile_temporal_embed + pos_embed = pos_embed.view(-1, self.patches_per_frame, pos_embed.size(-1)) + # ======= + if mask_it: + label = label[select[:, 0], select[:, 1]].view(B, -1, 3) + + label[x_mask == 0] = -100 + label = torch.cat( + [torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1, + ) + + cls_tokens = self.cls_token.expand(B, -1, -1) + # print(cls_tokens.size(), x.size()) + x = torch.cat((cls_tokens, x), dim=1) + pos_embed = torch.cat( + (self.pos_embed[:, 0, :][:, None, :].expand(B, -1, -1), pos_embed), dim=1 + ) + # print(x.size(), pos_embed.size()) + x = x + pos_embed + x = self.pos_drop(x) + + if self.add_norm_before_transformer: + x = self.pre_norm(x) + + x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1) + # print(x_mask) + # print(x_mask.size()) + # print(x.size()) + + if mask_it: + return x, x_mask, (patch_index, (H, W)), label + else: + return x, x_mask, (patch_index, (H, W)), None + + def forward_features(self, _x, max_image_len=144, mask_it=False): + x, x_mask, patch_index, label = self.visual_embed( + _x, max_image_len=max_image_len, mask_it=mask_it + ) + + for blk in self.blocks: + x, _ = blk(x, mask=x_mask) + + x = self.norm(x) + return x, x_mask, label + + def forward(self, x, max_image_len=-1): + x, _, _ = self.forward_features(x, max_image_len=max_image_len) + x = x[:, 0] + x = self.head(x) + return x + + +class DistilledVisionTransformer(VisionTransformer): + """ Vision Transformer with distillation token. + + Paper: `Training data-efficient image transformers & distillation through attention` - + https://arxiv.org/abs/2012.12877 + + This impl of distilled ViT is taken from https://github.com/facebookresearch/deit + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + num_patches = self.patch_embed.num_patches + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim)) + + trunc_normal_(self.dist_token, std=0.02) + trunc_normal_(self.pos_embed, std=0.02) + + def visual_embed(self, _x, max_image_len=200, mask_it=False): + _, _, ph, pw = self.patch_embed.proj.weight.shape + + x = self.patch_embed(_x) + x_mask = (_x.sum(dim=1) != 0).float()[:, None, :, :] + x_mask = F.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long() + x_h = x_mask[:, 0].sum(dim=1)[:, 0] + x_w = x_mask[:, 0].sum(dim=2)[:, 0] + + B, C, H, W = x.shape + spatial_pos = ( + self.pos_embed[:, 2:, :] + .transpose(1, 2) + .view(1, C, self.patch_dim, self.patch_dim) + ) + pos_embed = torch.cat( + [ + F.pad( + F.interpolate( + spatial_pos, size=(h, w), mode="bilinear", align_corners=True, + ), + (0, W - w, 0, H - h), + ) + for h, w in zip(x_h, x_w) + ], + dim=0, + ) + + pos_embed = pos_embed.flatten(2).transpose(1, 2) + x = x.flatten(2).transpose(1, 2) + patch_index = ( + torch.stack( + torch.meshgrid( + torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1]) + ), + dim=-1, + )[None, None, :, :, :] + .expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1) + .flatten(1, 3) + ) + x_mask = x_mask.flatten(1) + + if mask_it: + x, label = self.mask_tokens(_x, x) + + if ( + max_image_len < 0 + or max_image_len is None + or not isinstance(max_image_len, int) + ): + # suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrinked) + # (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get. + # if self.patch_size = 32, 25 * 41 = 1025 + # if res is 384 x 640, 12 * 20 = 240 + eff = x_h * x_w + max_image_len = eff.max() + else: + eff = x_h * x_w + max_image_len = min(eff.max(), max_image_len) + + valid_idx = x_mask.nonzero(as_tuple=False) + non_valid_idx = (1 - x_mask).nonzero(as_tuple=False) + unique_rows = valid_idx[:, 0].unique() + valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows] + non_valid_row_idx = [ + non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows + ] + + valid_nums = [v.size(0) for v in valid_row_idx] + non_valid_nums = [v.size(0) for v in non_valid_row_idx] + pad_nums = [max_image_len - v for v in valid_nums] + + select = list() + for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)): + if p <= 0: + valid_choice = torch.multinomial(torch.ones(v).float(), max_image_len) + select.append(valid_row_idx[i][valid_choice]) + else: + pad_choice = torch.multinomial( + torch.ones(nv).float(), p, replacement=True + ) + select.append( + torch.cat( + [valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0, + ) + ) + + select = torch.cat(select, dim=0) + x = x[select[:, 0], select[:, 1]].view(B, -1, C) + x_mask = x_mask[select[:, 0], select[:, 1]].view(B, -1) + patch_index = patch_index[select[:, 0], select[:, 1]].view(B, -1, 2) + pos_embed = pos_embed[select[:, 0], select[:, 1]].view(B, -1, C) + if mask_it: + label = label[select[:, 0], select[:, 1]].view(B, -1, 3) + + label[x_mask == 0] = -100 + label = torch.cat( + [torch.full((label.shape[0], 1, 3), -100).to(label), label,], dim=1, + ) + + cls_tokens = self.cls_token.expand(B, -1, -1) + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + pos_embed = torch.cat( + (self.pos_embed[:, :2, :].expand(B, -1, -1), pos_embed), dim=1 + ) + x = x + pos_embed + x = self.pos_drop(x) + + if self.add_norm_before_transformer: + x = self.pre_norm(x) + + x_mask = torch.cat([torch.ones(x_mask.shape[0], 2).to(x_mask), x_mask], dim=1) + if mask_it: + return x, x_mask, (patch_index, (H, W)), label + else: + return x, x_mask, (patch_index, (H, W)), None + + def forward_features(self, _x, max_image_len=144, mask_it=False): + x, x_mask, patch_index, label = self.visual_embed( + _x, max_image_len=max_image_len, mask_it=mask_it + ) + + for blk in self.blocks: + x, _ = blk(x, mask=x_mask) + + x = self.norm(x) + return x, x_mask, label + + def forward(self, x, max_image_len=-1): + x, _, _ = self.forward_features(x, max_image_len=max_image_len) + x = x[:, 0] + x = self.head(x) + return x + + +def resize_pos_embed(posemb, posemb_new): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info("Resized position embedding: %s to %s", posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if True: + posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:] + ntok_new -= 1 + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + gs_new = int(math.sqrt(ntok_new)) + _logger.info("Position embedding grid-size from %s to %s", gs_old, gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if "model" in state_dict: + # For deit models + state_dict = state_dict["model"] + for k, v in state_dict.items(): + if "patch_embed.proj.weight" in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == "pos_embed" and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed(v, model.pos_embed) + out_dict[k] = v + return out_dict + + +def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): + default_cfg = default_cfgs[variant] + default_num_classes = default_cfg["num_classes"] + default_img_size = default_cfg["input_size"][-1] + + num_classes = kwargs.pop("num_classes", default_num_classes) + img_size = kwargs.pop("img_size", default_img_size) + repr_size = kwargs.pop("representation_size", None) + if repr_size is not None and num_classes != default_num_classes: + # Remove representation layer if fine-tuning. This may not always be the desired action, + # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? + _logger.warning("Removing representation layer for fine-tuning.") + repr_size = None + + model_cls = DistilledVisionTransformer if distilled else VisionTransformer + model = model_cls( + img_size=img_size, + num_classes=num_classes, + representation_size=repr_size, + **kwargs, + ) + model.default_cfg = default_cfg + + if pretrained: + load_pretrained( + model, + num_classes=num_classes, + in_chans=kwargs.get("in_chans", 3), + filter_fn=partial(checkpoint_filter_fn, model=model), + strict=False, + ) + return model + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + """ My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.""" + model_kwargs = dict( + patch_size=16, + embed_dim=768, + depth=8, + num_heads=8, + mlp_ratio=3.0, + qkv_bias=False, + norm_layer=nn.LayerNorm, + **kwargs, + ) + if pretrained: + # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model + model_kwargs.setdefault("qk_scale", 768 ** -0.5) + model = _create_vision_transformer( + "vit_small_patch16_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_base_patch16_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_patch32_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_base_patch32_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_base_patch16_384", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_patch32_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_base_patch32_384", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer( + "vit_large_patch16_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_large_patch32_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer( + "vit_large_patch32_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer( + "vit_large_patch16_384", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_large_patch32_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer( + "vit_large_patch32_384", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + representation_size=768, + **kwargs, + ) + model = _create_vision_transformer( + "vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict( + patch_size=32, + embed_dim=768, + depth=12, + num_heads=12, + representation_size=768, + **kwargs, + ) + model = _create_vision_transformer( + "vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_large_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict( + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + representation_size=1024, + **kwargs, + ) + model = _create_vision_transformer( + "vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_large_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict( + patch_size=32, + embed_dim=1024, + depth=24, + num_heads=16, + representation_size=1024, + **kwargs, + ) + model = _create_vision_transformer( + "vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: converted weights not currently available, too large for github release hosting. + """ + model_kwargs = dict( + patch_size=14, + embed_dim=1280, + depth=32, + num_heads=16, + representation_size=1280, + **kwargs, + ) + model = _create_vision_transformer( + "vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = ResNetV2( + layers=(3, 4, 9), + num_classes=0, + global_pool="", + in_chans=kwargs.get("in_chans", 3), + preact=False, + stem_type="same", + conv_layer=StdConv2dSame, + ) + model_kwargs = dict( + embed_dim=768, + depth=12, + num_heads=12, + hybrid_backbone=backbone, + representation_size=768, + **kwargs, + ) + model = _create_vision_transformer( + "vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_resnet50_384(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head + backbone = ResNetV2( + layers=(3, 4, 9), + num_classes=0, + global_pool="", + in_chans=kwargs.get("in_chans", 3), + preact=False, + stem_type="same", + conv_layer=StdConv2dSame, + ) + model_kwargs = dict( + embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs + ) + model = _create_vision_transformer( + "vit_base_resnet50_384", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d( + pretrained=pretrained, + in_chans=kwargs.get("in_chans", 3), + features_only=True, + out_indices=[4], + ) + model_kwargs = dict( + embed_dim=768, + depth=8, + num_heads=8, + mlp_ratio=3, + hybrid_backbone=backbone, + **kwargs, + ) + model = _create_vision_transformer( + "vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_small_resnet50d_s3_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d( + pretrained=pretrained, + in_chans=kwargs.get("in_chans", 3), + features_only=True, + out_indices=[3], + ) + model_kwargs = dict( + embed_dim=768, + depth=8, + num_heads=8, + mlp_ratio=3, + hybrid_backbone=backbone, + **kwargs, + ) + model = _create_vision_transformer( + "vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d( + pretrained=pretrained, + in_chans=kwargs.get("in_chans", 3), + features_only=True, + out_indices=[4], + ) + model_kwargs = dict( + embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs + ) + model = _create_vision_transformer( + "vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d( + pretrained=pretrained, + in_chans=kwargs.get("in_chans", 3), + features_only=True, + out_indices=[4], + ) + model_kwargs = dict( + embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs + ) + model = _create_vision_transformer( + "vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_deit_tiny_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer( + "vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_deit_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer( + "vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_deit_base_patch16_224(pretrained=False, **kwargs): + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_deit_base_patch16_384(pretrained=False, **kwargs): + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs + ) + return model + + +@register_model +def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer( + "vit_deit_tiny_distilled_patch16_224", + pretrained=pretrained, + distilled=True, + **model_kwargs, + ) + return model + + +@register_model +def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer( + "vit_deit_small_distilled_patch16_224", + pretrained=pretrained, + distilled=True, + **model_kwargs, + ) + return model + + +@register_model +def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_deit_base_distilled_patch16_224", + pretrained=pretrained, + distilled=True, + **model_kwargs, + ) + return model + + +@register_model +def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + "vit_deit_base_distilled_patch16_384", + pretrained=pretrained, + distilled=True, + **model_kwargs, + ) + return model + diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/clip_decoders.py b/Downstream/multi-modalities-downstream/CoTrain/modules/clip_decoders.py new file mode 100644 index 0000000..3176a16 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/clip_decoders.py @@ -0,0 +1,112 @@ +from requests import patch +import torch +import torch.nn as nn + +from .coca import Residual, ParallelTransformerBlock, CrossAttention +from einops import repeat +import torch.utils.checkpoint as checkpoint +import numpy as np + +from timm.models.layers import trunc_normal_ as __call_trunc_normal_ + + +def trunc_normal_(tensor, mean=0.0, std=1.0): + __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + + +class CaptionDecoder(nn.Module): + def __init__( + self, + n_layers, + transformer_width, + vision_width, + transformer_heads, + vocab_size, + num_visual_queries=256, + use_checkpoint=False, + checkpoint_num=0, + ): + super().__init__() + scale = transformer_width**-0.5 + self.visual_queries = nn.Parameter( + scale * torch.randn(num_visual_queries, transformer_width) + ) + dim_head = transformer_width // transformer_heads + ff_mult = 4 + + self.visual_attn_pooler = CrossAttention( + dim=transformer_width, + context_dim=vision_width, + dim_head=dim_head, + heads=transformer_heads, + norm_context=True, + ) + self.visual_pooler_norm = nn.LayerNorm(transformer_width) + + self.text_norm = nn.LayerNorm(transformer_width) + + self.multimodal_layers = nn.ModuleList([]) + for ind in range(n_layers): + self.multimodal_layers.append( + nn.ModuleList( + [ + Residual( + ParallelTransformerBlock( + dim=transformer_width, + dim_head=dim_head, + heads=transformer_heads, + ff_mult=ff_mult, + ) + ), + Residual( + CrossAttention( + dim=transformer_width, + dim_head=dim_head, + heads=transformer_heads, + parallel_ff=True, + ff_mult=ff_mult, + ) + ), + ] + ) + ) + + self.predictor = nn.Sequential( + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, transformer_width), + nn.GELU(), + nn.LayerNorm(transformer_width), + nn.Linear(transformer_width, vocab_size), + ) + + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + + def forward(self, image_feats, text_embeds): + # image_feats: # L, N, T, C + # text_feats: embeded text feats # N, L, C + # [L, N, T, C] -> [N, T * L, C] + image_feats = image_feats.permute(1, 0, 2, 3).flatten(1, 2) + visual_queries = repeat( + self.visual_queries, 'n d -> b n d', b=image_feats.shape[0] + ) + image_feats = self.visual_pooler_norm( + self.visual_attn_pooler(visual_queries, image_feats) + ) + + text_embeds = self.text_norm(text_embeds) + # go through multimodal layers + for i, (attn_ff, cross_attn) in enumerate(self.multimodal_layers): + if self.use_checkpoint and i < self.checkpoint_num: + text_embeds = checkpoint.checkpoint(attn_ff, text_embeds) + text_embeds = checkpoint.checkpoint( + cross_attn, text_embeds, image_feats + ) + else: + text_embeds = attn_ff(text_embeds) + text_embeds = cross_attn(text_embeds, image_feats) + + logits = self.predictor(text_embeds) + + return logits + diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/clip_module.py b/Downstream/multi-modalities-downstream/CoTrain/modules/clip_module.py new file mode 100644 index 0000000..0db1b71 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/clip_module.py @@ -0,0 +1,753 @@ +from copy import deepcopy +import torch +import torch.nn as nn +import pytorch_lightning as pl +from CoTrain.modules import heads, cotrain_utils +from CoTrain.modules import objectives as objectives +from CoTrain.modules import base_vision_transformer as vit +from CoTrain.modules.text_prompt import text_prompt +import os +import matplotlib.pyplot as plt +import math + +import CoTrain.modules.InternVideo as clip_kc_new +from PIL import Image +import numpy as np +from .clip_param_keys import clip_param_keys, gradually_freeze_by_layer +from .clip_decoders import CaptionDecoder + + +def vis_save(imgs, texts): + # img: [B, T, C, H, W] + # texts: [str] + os.makedirs("vis_test", exist_ok=True) + imgs = imgs.permute(0, 1, 3, 4, 2).cpu().numpy() + imgs = (imgs - imgs.min()) / (imgs.max() - imgs.min()) + for img, text in zip(imgs, texts): + caption = "_".join(text.split()) + os.makedirs(os.path.join("vis_test", caption), exist_ok=True) + for i, im in enumerate(img): + img_path = os.path.join("vis_test", caption, f"{i}.png") + plt.imsave(img_path, im) + + +# utils +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + tensors_gather = [ + torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size()) + ] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output + + +@torch.no_grad() +def batch_shuffle_ddp(x): + """ + Batch shuffle, for making use of BatchNorm. + *** Only support DistributedDataParallel (DDP) model. *** + """ + # gather from all gpus + batch_size_this = x.shape[0] + x_gather = concat_all_gather(x) + batch_size_all = x_gather.shape[0] + + num_gpus = batch_size_all // batch_size_this + + # random shuffle index + idx_shuffle = torch.randperm(batch_size_all).cuda() + + # broadcast to all gpus + torch.distributed.broadcast(idx_shuffle, src=0) + + # index for restoring + idx_unshuffle = torch.argsort(idx_shuffle) + + # shuffled index for this gpu + gpu_idx = torch.distributed.get_rank() + idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx] + + return x_gather[idx_this], idx_unshuffle + + +@torch.no_grad() +def batch_unshuffle_ddp(x, idx_unshuffle): + """ + Undo batch shuffle. + *** Only support DistributedDataParallel (DDP) model. *** + """ + # gather from all gpus + batch_size_this = x.shape[0] + x_gather = concat_all_gather(x) + batch_size_all = x_gather.shape[0] + + num_gpus = batch_size_all // batch_size_this + + # restored index for this gpu + gpu_idx = torch.distributed.get_rank() + idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx] + + return x_gather[idx_this] + + +class CLIP(pl.LightningModule): + def __init__(self, config): + super().__init__() + self.save_hyperparameters() + self.clip_type = config["clip_type"] + self.prompt_type = config["prompt_type"] + self.mlm_prob = config["mlm_prob"] + self.mim_prob = config["mim_prob"] + self.qa_type = config["clip_qa_type"] + self.mc_type = config["clip_mc_type"] + self.mmt = config["clip_mmt"] + self.alt_data = config["clip_alt_data"] + + if config["clip_type"] == "kc_new": + self.clip, self.clip_preprocess = clip_kc_new.load( + config["clip"], + t_size=config["num_frames"], + n_layers=4, + mlp_dropout=[config["clip_evl_dropout"]] * 4, + cls_dropout=config["clip_evl_dropout"], + no_pretrain=config["clip_no_pretrain"], + init_zero=config["clip_init_zero"], + drop_path_rate=config["clip_dpr"], + device=self.device, + use_checkpoint=config["clip_use_checkpoint"], + checkpoint_num=config["clip_checkpoint_num"], + # mask_text=( + # self.hparams.config["loss_names"]["mlm"] > 0 + # or ( + # self.hparams.config["loss_names"]["openend_vqa"] > 0 + # and self.qa_type in ["zs", "mlm", "vtc_mlm"] + # ) + # ), + ) + else: + raise NotImplementedError( + "Clip type: {} not implemented".format(config["clip_type"]) + ) + + cotrain_utils.set_metrics(self) + self.current_tasks = list() + + vision_width = self.clip.visual.conv1.weight.shape[0] + transformer_width = self.clip.transformer.width + + if self.hparams.config["loss_names"]["openend_vqa"] > 0: + if self.qa_type == "vtc": + hs = vision_width + transformer_width + elif self.qa_type in ["cap"]: + hs = transformer_width + elif self.qa_type in ["vtc_cap", "vtc_mlm"]: + # We cat the vision feature, text feature + # and cross feature together + hs = vision_width + transformer_width * 2 + elif self.qa_type in ["zs", "mlm"]: + pass + else: + raise NotImplementedError("QA Type {} Not Implemented") + if self.qa_type in ["vtc", "cap", "vtc_cap", "vtc_mlm"]: + self.clip.text_projection = None + self.clip.visual_proj = None + vs = self.hparams.config["msrvttqa_label_size"] + self.vqa_classifier = nn.Sequential( + nn.Dropout(config["clip_cls_dropout"]), + nn.Linear(hs, hs * 2), + nn.LayerNorm(hs * 2), + nn.GELU(), + nn.Dropout(config["clip_cls_dropout"]), + # nn.Linear(hs * 2, hs * 2), + # nn.GELU(), + # nn.Dropout(config["clip_cls_dropout"]), + # nn.LayerNorm(hs * 2), + nn.Linear(hs * 2, vs), + ) + self.vqa_classifier.apply(objectives.init_weights) + + if self.hparams.config["loss_names"]["multiple_choice"] > 0: + if self.mc_type == "vtc": + pass + elif self.mc_type == "cap": + hs = transformer_width + elif self.mc_type == "vtc_cap": + # We cat the vision feature, text feature + # and cross feature together + hs = vision_width + transformer_width * 2 + else: + raise NotImplementedError("MC Type {} Not Implemented") + + if self.mc_type in ["cap", "vtc_cap"]: + self.clip.text_projection = None + self.clip.visual_proj = None + self.rank_output = nn.Sequential( + nn.Dropout(config["clip_cls_dropout"]), + nn.Linear(hs, hs * 2), + nn.LayerNorm(hs * 2), + nn.GELU(), + nn.Dropout(config["clip_cls_dropout"]), + nn.Linear(hs * 2, 1), + ) + self.rank_output.apply(objectives.init_weights) + + if ( + self.hparams.config["loss_names"]["cap"] > 0 + or ( + self.hparams.config["loss_names"]["openend_vqa"] > 0 + and self.qa_type in ["cap", "vtc_cap"] + ) + or ( + self.hparams.config["loss_names"]["multiple_choice"] > 0 + and self.mc_type in ["cap", "vtc_cap"] + ) + ): + vs = self.clip.vocab_size + if self.hparams.config["loss_names"][ + "openend_vqa" + ] > 0 and self.qa_type in ["cap", "vtc_cap"]: + vs = self.hparams.config["msrvttqa_label_size"] + self.caption_decoder = CaptionDecoder( + n_layers=config["clip_cap_decoder_n_layers"], + transformer_width=transformer_width, + vision_width=vision_width, + transformer_heads=transformer_width // 64, + vocab_size=vs, + use_checkpoint=config["clip_use_checkpoint"], + checkpoint_num=config["clip_checkpoint_num"], + ) + if ( + self.hparams.config["loss_names"]["openend_vqa"] > 0 + and self.qa_type in ["cap", "vtc_cap"] + ) or ( + self.hparams.config["loss_names"]["multiple_choice"] > 0 + and self.mc_type in ["cap", "vtc_cap"] + ): + self.caption_decoder.predictor = nn.Identity() + self.caption_decoder.apply(objectives.init_weights) + + # For zs_classify + self.text_ret = None + + if self.hparams.config["load_path"] != "": + # Support multiple load_path + if isinstance(self.hparams.config["load_path"], str): + self.hparams.config["load_path"] = [self.hparams.config["load_path"]] + + for i, load_path in enumerate(self.hparams.config["load_path"]): + ckpt = torch.load( + cotrain_utils.read_load_path(load_path), + map_location="cpu", + ) + if i == 0: + state_dict = ckpt["state_dict"] + continue + for k in state_dict.keys(): + state_dict[k] += ckpt["state_dict"][k] + + for k in state_dict.keys(): + state_dict[k] /= len(self.hparams.config["load_path"]) + + modified_keys = [] + if config["clip_wiseft_coef"] > 0: + c = config["clip_wiseft_coef"] + assert 0 < c < 1.0 + # We assume using clip weight by default + clip_sd = {k: v.cpu() for k, v in self.state_dict().items()} + new_sd = deepcopy(state_dict) + # Directly modify state_dict to load + for k in new_sd: + if k not in clip_sd: + continue + if any(x in k for x in clip_param_keys): + new_sd[k] = clip_sd[k] * c + state_dict[k] * (1.0 - c) + modified_keys.append(k) + state_dict = new_sd + + # Remove mismatch parameters for 336 + sd = {k: v.cpu() for k, v in self.state_dict().items()} + for k in list(state_dict.keys()): + if k not in sd: + continue + if state_dict[k].shape != sd[k].shape: + print( + "!!!!!!!!!!!Size mismatch {} {} {}".format( + k, state_dict[k].shape, sd[k].shape + ) + ) + del state_dict[k] + + self.load_state_dict(state_dict, strict=False) + + if config["clip_freeze"] and config["clip_type"] == "evl": + self.freeze_clip_evl() + + if config["clip_freeze"] and config["clip_type"] == "kc": + self.freeze_clip() + + if config["clip_freeze_text"]: + self.freeze_text() + + self.grad_unfreeze_int = config["clip_grad_unfreeze_int"] + if self.grad_unfreeze_int > 0: + self.freeze_clip() + + if self.mmt: + # MoCo Setting + K = 65536 + m = 0.999 + dim = self.clip.embed_dim + self.K = K + self.m = m + + self.clip_k = deepcopy(self.clip) + for p in self.clip_k.parameters(): + p.requires_grad = False # not update by gradient + + # create the queue + self.register_buffer("queue_visual", torch.randn(dim, K)) + self.register_buffer("queue_text", torch.randn(dim, K)) + self.queue_visual = nn.functional.normalize(self.queue_visual, dim=0) + self.queue_text = nn.functional.normalize(self.queue_text, dim=0) + + self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long)) + + def freeze_clip_evl(self): + for n, p in self.named_parameters(): + if ( + "clip.visual" in n + and "clip.visual_ln_post" not in n + and "clip.visual_proj" not in n + ): + p.requires_grad = False + elif "clip.transformer" in n: + p.requires_grad = False + elif "clip.token_embedding" in n: + p.requires_grad = False + elif "clip.positional_embedding" in n: + p.requires_grad = False + + def freeze_clip(self): + for n, p in self.named_parameters(): + # Unfreeze the projection layer + if any(x in n for x in ["text_projection", "visual_proj", "visual.proj"]): + continue + if any(x in n for x in clip_param_keys): + p.requires_grad = False + + def freeze_text(self): + for n, p in self.named_parameters(): + if "clip.transformer" in n: + p.requires_grad = False + elif "clip.token_embedding" in n: + p.requires_grad = False + elif "clip.positional_embedding" in n: + p.requires_grad = False + elif "clip.ln_final" in n: + p.requires_grad = False + elif "clip.text_projection" in n: + p.requires_grad = False + elif "clip.eot_token_embedding" in n: + p.requires_grad = False + + @torch.no_grad() + def mask_text_ids(self, text_ids, special_tokens_mask): + if "openend_vqa" in self.current_tasks: + return self.mask_text_ids_qa_mlm(text_ids) + # See https://github.com/huggingface/transformers/blob/a22db885b41b3a1b302fc206312ee4d99cdf4b7c/src/transformers/data/data_collator.py#L748 + # text_ids, special_tokens_mask: torch.Tensor of shape (N, L) + labels = text_ids.clone().long() + probability_matrix = torch.full(labels.shape, self.mlm_prob, device=self.device) + + # do not mask special_token, including sot_token, eot_token and empty + probability_matrix.masked_fill_(special_tokens_mask, value=0.0) + # probability_matrix[:, 0] = 0.0 + # probability_matrix[torch.arange(labels.shape[0]), text_ids.argmax(dim=-1)] = 0.0 + + masked_indices = torch.bernoulli(probability_matrix).bool() + # We only compute loss on masked tokens, note that padding id is 0 + labels[~masked_indices] = -100 + + # ? Should we use other augmentation in Bert? + return text_ids, labels, masked_indices + + @torch.no_grad() + def mask_text_ids_qa_mlm(self, text_ids): + # The text should be in the format "Question: {} Anwser:" + # We add a mask at the end of the sentence + eot_id = text_ids[torch.arange(text_ids.shape[0]), text_ids.argmax(dim=-1)] + assert torch.numel(torch.unique(eot_id)) == 1 + masked_indices = torch.zeros( + *text_ids.shape, dtype=torch.bool, device=text_ids.device + ) + masked_indices[torch.arange(text_ids.shape[0]), text_ids.argmax(dim=-1)] = 1 + + labels = text_ids.clone() + text_ids[torch.arange(labels.shape[0]), labels.argmax(dim=-1)] = eot_id - 1 + text_ids[torch.arange(labels.shape[0]), labels.argmax(dim=-1) + 1] = eot_id + + return text_ids, None, masked_indices + + @torch.no_grad() + def mask_visual(self, video, mode="video"): + assert mode in ["video", "image"] + N, C, T, H, W = video.shape + patch_size = self.clip.visual.conv1.weight.shape[-1] + N = N * T + L = H * W // (patch_size * patch_size) + + # This is different from text as we are masking a fix number of tokens + Lm = int(self.mim_prob * L) + masked_indices = torch.zeros(N, L) + indices = torch.argsort(torch.rand_like(masked_indices), dim=-1)[:, :Lm] + batch_indices = ( + torch.arange(masked_indices.shape[0]).unsqueeze(-1).expand_as(indices) + ) + masked_indices[batch_indices, indices] = 1 + + masked_indices = masked_indices.bool() + return masked_indices + + @torch.no_grad() + def _momentum_update_key_encoder(self): + """ + Momentum update of the key encoder + """ + for param_q, param_k in zip(self.clip.parameters(), self.clip_k.parameters()): + param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m) + + @torch.no_grad() + def _dequeue_and_enqueue(self, text_keys, visual_keys): + # gather keys before updating queue + text_keys = concat_all_gather(text_keys) + visual_keys = concat_all_gather(visual_keys) + + batch_size = text_keys.shape[0] + + ptr = int(self.queue_ptr) + assert self.K % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.queue_text[:, ptr : ptr + batch_size] = text_keys.T + self.queue_visual[:, ptr : ptr + batch_size] = visual_keys.T + ptr = (ptr + batch_size) % self.K # move pointer + + self.queue_ptr[0] = ptr + + def infer( + self, + batch, + mask_text=False, + mask_video=False, + input_video_only=False, + input_text_only=False, + caption=False, + mode="video", + ): + imgkey = "video" + + # Check configs + assert not input_video_only + assert not input_text_only + if mask_text: + assert self.clip_type in ["ori", "evl", "kc", "kc_new"] + if mask_video: + assert self.clip_type in ["ori", "kc", "kc_new"] + + # Encode Text ######################################################### + if "clip_text_ids" in batch: + # If the clip tokenization is prepared + text_ids, special_tokens_mask = ( + batch["clip_text_ids"], + batch["clip_special_tokens_mask"], + ) + else: # TODO: Remove this else + text_ids, special_tokens_mask = clip_kc_new.tokenize( + batch[f"text"], truncate=True, return_special_tokens_mask=True + ) + text_ids = text_ids.to(self.device) + special_tokens_mask = special_tokens_mask.to(self.device) + + if mask_text: # ! This messes with text_feats and text_all_feats + masked_text_ids, text_labels, text_masked_indices = self.mask_text_ids( + text_ids, special_tokens_mask + ) + # [N, C], [N, L, C] + text_feats, text_all_feats = self.clip.encode_text( + masked_text_ids, + masked_indices=text_masked_indices, + return_all_feats=True, + ) + else: + text_feats, text_all_feats = self.clip.encode_text( + text_ids, return_all_feats=True + ) + + # Encode Video ######################################################## + video = batch[imgkey][0] + if self.clip_type in ["ori", "evl", "kc", "kc_new"]: + # [N, T, C, H, W] -> [N, C, T, H, W] + video = video.contiguous().transpose(1, 2) + + # TODO: Remove this if + # [N, C], [L, N, T, C] + # video_feats for contrastive, video_all_feats for mlm, caption + if mask_video: + visual_masked_indices = self.mask_visual(video, mode=mode) + video_feats, video_all_feats = self.clip.encode_video( + video, + return_all_feats=True, + masked_indices=visual_masked_indices, + mode=mode, + ) + else: + video_feats, video_all_feats = self.clip.encode_video( + video, return_all_feats=True, mode=mode + ) + + ret = { + "video": video, # N, C, T, H, W + "text_feats": text_feats, # N, C + "video_feats": video_feats, # N, C + "text_ids": text_ids, # N, L + "special_tokens_mask": special_tokens_mask, # N, L + } + + if self.mmt: + # compute key features + with torch.no_grad(): # no gradient to keys + self._momentum_update_key_encoder() # update the key encoder + assert not any([mask_text, mask_video, mode != "video"]) + # TODO: We have BN, batch shuffle ? + text_feats_k = self.clip_k.encode_text(text_ids, return_all_feats=False) + # img, idx_unshuffle = batch_shuffle_ddp(img) + video_feats_k = self.clip_k.encode_video( + video, return_all_feats=False, mode=mode + ) + # video_feats_k = batch_unshuffle_ddp(video_feats_k, idx_unshuffle) + ret.update( + { + "text_feats_k": text_feats_k, + "video_feats_k": video_feats_k, + } + ) + + # Mask Text Decoder ################################################## + if mask_text: + text_con_feats = text_feats + text_feats = self.text_decoder(text_all_feats, video_all_feats) + ret.update( + { + # ! Modified the original, no other loss should do the same + "text_feats": text_feats, # N, C + "text_labels": text_labels, # N, L + "text_contrastive_feats": text_con_feats, # N, L + } + ) + + # Mask Visual Decoder################################################# + if mask_video and hasattr(self, "visual_decoder"): + mim_video_feats = self.visual_decoder( + video_all_feats, visual_masked_indices + ) + ret.update( + { + "mim_video_feats": mim_video_feats, # N, L, C + "visual_masked_indices": visual_masked_indices, # N, L + } + ) + + # Caption decoder ################################################## + if caption: + cap_logits = self.caption_decoder(video_all_feats, text_all_feats[:, :-1]) + ret.update( + { + "cap_logits": cap_logits, + } + ) + + return ret + + def sanity_check(self): + image = ( + self.clip_preprocess( + Image.open( + "/mnt/petrelfs/liyizhuo/projects/all-in-one-cotrain/CoTraining/dog.png" + ) + ) + .unsqueeze(0) + .to(self.device) + ) + T = 16 + B, C, H, W = image.shape + video = image.repeat(T, 1, 1, 1).reshape(T, B, C, H, W).permute(1, 0, 2, 3, 4) + self.eval() + infer = self.infer( + { + "video": [video], + "text": ["a diagram", "a dog", "a cat"], + }, + mode="image", + ) + score = ( + self.clip.logit_scale.exp() * infer["video_feats"] @ infer["text_feats"].t() + ) + assert False, (score.softmax(dim=-1), self.clip.logit_scale.item()) + + def forward(self, batch, batch_idx=None, mode="video"): + # self.sanity_check() + with torch.no_grad(): + self.clip.logit_scale.clamp_(0, math.log(100)) + ret = dict() + if len(self.current_tasks) == 0: + ret.update(self.infer(batch, mode=mode)) + return ret + + if "contrastive" in self.current_tasks: + mask_text = "mlm" in self.current_tasks + mask_video = "mim" in self.current_tasks + caption = "cap" in self.current_tasks + if any([mask_text, mask_video, caption]): + contrastive_ret, contrastive_infer = objectives.compute_contrastive( + self, + batch, + return_infer=True, + mask_text=mask_text, + mask_video=mask_video, + caption=caption, + mode=mode, + ) + ret.update(contrastive_ret) + else: + ret.update(objectives.compute_contrastive(self, batch, mode=mode)) + + if "multiple_choice" in self.current_tasks: + ret.update(objectives.compute_multiple_choice(self, batch)) + + if "openend_vqa" in self.current_tasks: + ret.update(objectives.compute_openend_vqa(self, batch)) + + if "mlm" in self.current_tasks: + if "contrastive" in self.current_tasks: # Skip infer + ret.update(objectives.compute_mlm(self, batch, infer=contrastive_infer)) + else: + ret.update(objectives.compute_mlm(self, batch)) + + if "mim" in self.current_tasks and hasattr(self, "visual_decoder"): + if "contrastive" in self.current_tasks: # Skip infer + ret.update( + objectives.compute_mim( + self, batch, infer=contrastive_infer, mode=mode + ) + ) + else: + ret.update(objectives.compute_mim(self, batch)) + + if "cap" in self.current_tasks: + if "contrastive" in self.current_tasks: # Skip infer + ret.update( + objectives.compute_cap( + self, batch, infer=contrastive_infer, mode=mode + ) + ) + else: + ret.update(objectives.compute_cap(self, batch, mode=mode)) + + if "zs_classify" in self.current_tasks: + if self.text_ret is None: + # print(f"Generate text features for in batch-{batch_idx}") + self.text_ret = self.forward_text() + ret.update(objectives.compute_zs_classify(self, batch, self.text_ret)) + + return ret + + def forward_text(self): + classes, num_text_aug, _ = text_prompt(prompt_type=self.prompt_type) + text_inputs = classes.to(self.device) + text_feats = self.clip.encode_text(text_inputs) + # text_feats /= text_feats.norm(dim=-1, keepdim=True) + + ret = { + "text_feats": text_feats, # num_text_aug * num_classes, C + "num_text_aug": num_text_aug, + } + return ret + + def forward_video(self, batch): + img = batch["video"][0] + if self.clip_type in ["ori", "evl", "kc", "kc_new"]: + # [B, T, C, H, W] -> [B, C, T, H, W] + img = img.contiguous().transpose(1, 2) + video_feats = self.clip.encode_video(img) + + ret = { + "video_feats": video_feats, # N, C + } + return ret + + def training_step(self, batch, batch_idx): + # gradually_freeze_by_layer(self, self.global_step, self.grad_unfreeze_int) + cotrain_utils.set_task(self) + # self.momentum_checkpoint() + # co-training + if "v" in batch and "i" in batch: + video_output, image_output = {}, {} + if not self.alt_data or batch_idx % 2 == 0: + video_output = self(batch["v"], mode="video") + if not self.alt_data or batch_idx % 2 == 1: + image_output = self(batch["i"], mode="image") + total_loss = sum([v for k, v in video_output.items() if "loss" in k]) + sum( + [v for k, v in image_output.items() if "loss" in k] + ) + else: + output = self(batch, mode="video") + total_loss = sum([v for k, v in output.items() if "loss" in k]) + return total_loss + + def training_epoch_end(self, outs): + cotrain_utils.epoch_wrapup(self) + + def validation_step(self, batch, batch_idx): + cotrain_utils.set_task(self) + if "v" in batch and "i" in batch: + video_output = self(batch["v"], mode="video") + image_output = self(batch["i"], mode="image") + else: + output = self(batch, mode="video") + + def validation_epoch_end(self, outs): + cotrain_utils.epoch_wrapup(self) + self.text_ret = None + + def test_step(self, batch, batch_idx): + cotrain_utils.set_task(self) + if "v" in batch and "i" in batch: + video_output = self(batch["v"], mode="video") + image_output = self(batch["i"], mode="image") + else: + output = self(batch, mode="video") + ret = dict() + + if self.hparams.config["loss_names"]["vqa"] > 0: + ret.update(objectives.vqa_test_step(self, batch, image_output)) + + return ret + + def test_epoch_end(self, outs): + if isinstance(self.hparams.config["load_path"], str): + model_name = self.hparams.config["load_path"].split("/")[-1][:-5] + else: + model_name = "multiple" + + if self.hparams.config["loss_names"]["vqa"] > 0: + objectives.vqa_test_wrapup(outs, model_name) + cotrain_utils.epoch_wrapup(self) + + def configure_optimizers(self): + return cotrain_utils.set_schedule(self) diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/clip_param_keys.py b/Downstream/multi-modalities-downstream/CoTrain/modules/clip_param_keys.py new file mode 100644 index 0000000..a70e510 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/clip_param_keys.py @@ -0,0 +1,368 @@ +clip_param_keys = { + 'positional_embedding', + 'text_projection', + 'visual.class_embedding', + 'visual.positional_embedding', + 'visual.conv1.weight', + 'visual.ln_pre.weight', + 'visual.ln_pre.bias', + 'visual.transformer.resblocks.0.attn.in_proj_weight', + 'visual.transformer.resblocks.0.attn.in_proj_bias', + 'visual.transformer.resblocks.0.attn.out_proj.weight', + 'visual.transformer.resblocks.0.attn.out_proj.bias', + 'visual.transformer.resblocks.0.ln_1.weight', + 'visual.transformer.resblocks.0.ln_1.bias', + 'visual.transformer.resblocks.0.mlp.c_fc.weight', + 'visual.transformer.resblocks.0.mlp.c_fc.bias', + 'visual.transformer.resblocks.0.mlp.c_proj.weight', + 'visual.transformer.resblocks.0.mlp.c_proj.bias', + 'visual.transformer.resblocks.0.ln_2.weight', + 'visual.transformer.resblocks.0.ln_2.bias', + 'visual.transformer.resblocks.1.attn.in_proj_weight', + 'visual.transformer.resblocks.1.attn.in_proj_bias', + 'visual.transformer.resblocks.1.attn.out_proj.weight', + 'visual.transformer.resblocks.1.attn.out_proj.bias', + 'visual.transformer.resblocks.1.ln_1.weight', + 'visual.transformer.resblocks.1.ln_1.bias', + 'visual.transformer.resblocks.1.mlp.c_fc.weight', + 'visual.transformer.resblocks.1.mlp.c_fc.bias', + 'visual.transformer.resblocks.1.mlp.c_proj.weight', + 'visual.transformer.resblocks.1.mlp.c_proj.bias', + 'visual.transformer.resblocks.1.ln_2.weight', + 'visual.transformer.resblocks.1.ln_2.bias', + 'visual.transformer.resblocks.2.attn.in_proj_weight', + 'visual.transformer.resblocks.2.attn.in_proj_bias', + 'visual.transformer.resblocks.2.attn.out_proj.weight', + 'visual.transformer.resblocks.2.attn.out_proj.bias', + 'visual.transformer.resblocks.2.ln_1.weight', + 'visual.transformer.resblocks.2.ln_1.bias', + 'visual.transformer.resblocks.2.mlp.c_fc.weight', + 'visual.transformer.resblocks.2.mlp.c_fc.bias', + 'visual.transformer.resblocks.2.mlp.c_proj.weight', + 'visual.transformer.resblocks.2.mlp.c_proj.bias', + 'visual.transformer.resblocks.2.ln_2.weight', + 'visual.transformer.resblocks.2.ln_2.bias', + 'visual.transformer.resblocks.3.attn.in_proj_weight', + 'visual.transformer.resblocks.3.attn.in_proj_bias', + 'visual.transformer.resblocks.3.attn.out_proj.weight', + 'visual.transformer.resblocks.3.attn.out_proj.bias', + 'visual.transformer.resblocks.3.ln_1.weight', + 'visual.transformer.resblocks.3.ln_1.bias', + 'visual.transformer.resblocks.3.mlp.c_fc.weight', + 'visual.transformer.resblocks.3.mlp.c_fc.bias', + 'visual.transformer.resblocks.3.mlp.c_proj.weight', + 'visual.transformer.resblocks.3.mlp.c_proj.bias', + 'visual.transformer.resblocks.3.ln_2.weight', + 'visual.transformer.resblocks.3.ln_2.bias', + 'visual.transformer.resblocks.4.attn.in_proj_weight', + 'visual.transformer.resblocks.4.attn.in_proj_bias', + 'visual.transformer.resblocks.4.attn.out_proj.weight', + 'visual.transformer.resblocks.4.attn.out_proj.bias', + 'visual.transformer.resblocks.4.ln_1.weight', + 'visual.transformer.resblocks.4.ln_1.bias', + 'visual.transformer.resblocks.4.mlp.c_fc.weight', + 'visual.transformer.resblocks.4.mlp.c_fc.bias', + 'visual.transformer.resblocks.4.mlp.c_proj.weight', + 'visual.transformer.resblocks.4.mlp.c_proj.bias', + 'visual.transformer.resblocks.4.ln_2.weight', + 'visual.transformer.resblocks.4.ln_2.bias', + 'visual.transformer.resblocks.5.attn.in_proj_weight', + 'visual.transformer.resblocks.5.attn.in_proj_bias', + 'visual.transformer.resblocks.5.attn.out_proj.weight', + 'visual.transformer.resblocks.5.attn.out_proj.bias', + 'visual.transformer.resblocks.5.ln_1.weight', + 'visual.transformer.resblocks.5.ln_1.bias', + 'visual.transformer.resblocks.5.mlp.c_fc.weight', + 'visual.transformer.resblocks.5.mlp.c_fc.bias', + 'visual.transformer.resblocks.5.mlp.c_proj.weight', + 'visual.transformer.resblocks.5.mlp.c_proj.bias', + 'visual.transformer.resblocks.5.ln_2.weight', + 'visual.transformer.resblocks.5.ln_2.bias', + 'visual.transformer.resblocks.6.attn.in_proj_weight', + 'visual.transformer.resblocks.6.attn.in_proj_bias', + 'visual.transformer.resblocks.6.attn.out_proj.weight', + 'visual.transformer.resblocks.6.attn.out_proj.bias', + 'visual.transformer.resblocks.6.ln_1.weight', + 'visual.transformer.resblocks.6.ln_1.bias', + 'visual.transformer.resblocks.6.mlp.c_fc.weight', + 'visual.transformer.resblocks.6.mlp.c_fc.bias', + 'visual.transformer.resblocks.6.mlp.c_proj.weight', + 'visual.transformer.resblocks.6.mlp.c_proj.bias', + 'visual.transformer.resblocks.6.ln_2.weight', + 'visual.transformer.resblocks.6.ln_2.bias', + 'visual.transformer.resblocks.7.attn.in_proj_weight', + 'visual.transformer.resblocks.7.attn.in_proj_bias', + 'visual.transformer.resblocks.7.attn.out_proj.weight', + 'visual.transformer.resblocks.7.attn.out_proj.bias', + 'visual.transformer.resblocks.7.ln_1.weight', + 'visual.transformer.resblocks.7.ln_1.bias', + 'visual.transformer.resblocks.7.mlp.c_fc.weight', + 'visual.transformer.resblocks.7.mlp.c_fc.bias', + 'visual.transformer.resblocks.7.mlp.c_proj.weight', + 'visual.transformer.resblocks.7.mlp.c_proj.bias', + 'visual.transformer.resblocks.7.ln_2.weight', + 'visual.transformer.resblocks.7.ln_2.bias', + 'visual.transformer.resblocks.8.attn.in_proj_weight', + 'visual.transformer.resblocks.8.attn.in_proj_bias', + 'visual.transformer.resblocks.8.attn.out_proj.weight', + 'visual.transformer.resblocks.8.attn.out_proj.bias', + 'visual.transformer.resblocks.8.ln_1.weight', + 'visual.transformer.resblocks.8.ln_1.bias', + 'visual.transformer.resblocks.8.mlp.c_fc.weight', + 'visual.transformer.resblocks.8.mlp.c_fc.bias', + 'visual.transformer.resblocks.8.mlp.c_proj.weight', + 'visual.transformer.resblocks.8.mlp.c_proj.bias', + 'visual.transformer.resblocks.8.ln_2.weight', + 'visual.transformer.resblocks.8.ln_2.bias', + 'visual.transformer.resblocks.9.attn.in_proj_weight', + 'visual.transformer.resblocks.9.attn.in_proj_bias', + 'visual.transformer.resblocks.9.attn.out_proj.weight', + 'visual.transformer.resblocks.9.attn.out_proj.bias', + 'visual.transformer.resblocks.9.ln_1.weight', + 'visual.transformer.resblocks.9.ln_1.bias', + 'visual.transformer.resblocks.9.mlp.c_fc.weight', + 'visual.transformer.resblocks.9.mlp.c_fc.bias', + 'visual.transformer.resblocks.9.mlp.c_proj.weight', + 'visual.transformer.resblocks.9.mlp.c_proj.bias', + 'visual.transformer.resblocks.9.ln_2.weight', + 'visual.transformer.resblocks.9.ln_2.bias', + 'visual.transformer.resblocks.10.attn.in_proj_weight', + 'visual.transformer.resblocks.10.attn.in_proj_bias', + 'visual.transformer.resblocks.10.attn.out_proj.weight', + 'visual.transformer.resblocks.10.attn.out_proj.bias', + 'visual.transformer.resblocks.10.ln_1.weight', + 'visual.transformer.resblocks.10.ln_1.bias', + 'visual.transformer.resblocks.10.mlp.c_fc.weight', + 'visual.transformer.resblocks.10.mlp.c_fc.bias', + 'visual.transformer.resblocks.10.mlp.c_proj.weight', + 'visual.transformer.resblocks.10.mlp.c_proj.bias', + 'visual.transformer.resblocks.10.ln_2.weight', + 'visual.transformer.resblocks.10.ln_2.bias', + 'visual.transformer.resblocks.11.attn.in_proj_weight', + 'visual.transformer.resblocks.11.attn.in_proj_bias', + 'visual.transformer.resblocks.11.attn.out_proj.weight', + 'visual.transformer.resblocks.11.attn.out_proj.bias', + 'visual.transformer.resblocks.11.ln_1.weight', + 'visual.transformer.resblocks.11.ln_1.bias', + 'visual.transformer.resblocks.11.mlp.c_fc.weight', + 'visual.transformer.resblocks.11.mlp.c_fc.bias', + 'visual.transformer.resblocks.11.mlp.c_proj.weight', + 'visual.transformer.resblocks.11.mlp.c_proj.bias', + 'visual.transformer.resblocks.11.ln_2.weight', + 'visual.transformer.resblocks.11.ln_2.bias', + 'visual.ln_post.weight', + 'visual.ln_post.bias', + 'visual_ln_post.weight', + 'visual_ln_post.bias', + 'transformer.resblocks.0.attn.in_proj_weight', + 'transformer.resblocks.0.attn.in_proj_bias', + 'transformer.resblocks.0.attn.out_proj.weight', + 'transformer.resblocks.0.attn.out_proj.bias', + 'transformer.resblocks.0.ln_1.weight', + 'transformer.resblocks.0.ln_1.bias', + 'transformer.resblocks.0.mlp.c_fc.weight', + 'transformer.resblocks.0.mlp.c_fc.bias', + 'transformer.resblocks.0.mlp.c_proj.weight', + 'transformer.resblocks.0.mlp.c_proj.bias', + 'transformer.resblocks.0.ln_2.weight', + 'transformer.resblocks.0.ln_2.bias', + 'transformer.resblocks.1.attn.in_proj_weight', + 'transformer.resblocks.1.attn.in_proj_bias', + 'transformer.resblocks.1.attn.out_proj.weight', + 'transformer.resblocks.1.attn.out_proj.bias', + 'transformer.resblocks.1.ln_1.weight', + 'transformer.resblocks.1.ln_1.bias', + 'transformer.resblocks.1.mlp.c_fc.weight', + 'transformer.resblocks.1.mlp.c_fc.bias', + 'transformer.resblocks.1.mlp.c_proj.weight', + 'transformer.resblocks.1.mlp.c_proj.bias', + 'transformer.resblocks.1.ln_2.weight', + 'transformer.resblocks.1.ln_2.bias', + 'transformer.resblocks.2.attn.in_proj_weight', + 'transformer.resblocks.2.attn.in_proj_bias', + 'transformer.resblocks.2.attn.out_proj.weight', + 'transformer.resblocks.2.attn.out_proj.bias', + 'transformer.resblocks.2.ln_1.weight', + 'transformer.resblocks.2.ln_1.bias', + 'transformer.resblocks.2.mlp.c_fc.weight', + 'transformer.resblocks.2.mlp.c_fc.bias', + 'transformer.resblocks.2.mlp.c_proj.weight', + 'transformer.resblocks.2.mlp.c_proj.bias', + 'transformer.resblocks.2.ln_2.weight', + 'transformer.resblocks.2.ln_2.bias', + 'transformer.resblocks.3.attn.in_proj_weight', + 'transformer.resblocks.3.attn.in_proj_bias', + 'transformer.resblocks.3.attn.out_proj.weight', + 'transformer.resblocks.3.attn.out_proj.bias', + 'transformer.resblocks.3.ln_1.weight', + 'transformer.resblocks.3.ln_1.bias', + 'transformer.resblocks.3.mlp.c_fc.weight', + 'transformer.resblocks.3.mlp.c_fc.bias', + 'transformer.resblocks.3.mlp.c_proj.weight', + 'transformer.resblocks.3.mlp.c_proj.bias', + 'transformer.resblocks.3.ln_2.weight', + 'transformer.resblocks.3.ln_2.bias', + 'transformer.resblocks.4.attn.in_proj_weight', + 'transformer.resblocks.4.attn.in_proj_bias', + 'transformer.resblocks.4.attn.out_proj.weight', + 'transformer.resblocks.4.attn.out_proj.bias', + 'transformer.resblocks.4.ln_1.weight', + 'transformer.resblocks.4.ln_1.bias', + 'transformer.resblocks.4.mlp.c_fc.weight', + 'transformer.resblocks.4.mlp.c_fc.bias', + 'transformer.resblocks.4.mlp.c_proj.weight', + 'transformer.resblocks.4.mlp.c_proj.bias', + 'transformer.resblocks.4.ln_2.weight', + 'transformer.resblocks.4.ln_2.bias', + 'transformer.resblocks.5.attn.in_proj_weight', + 'transformer.resblocks.5.attn.in_proj_bias', + 'transformer.resblocks.5.attn.out_proj.weight', + 'transformer.resblocks.5.attn.out_proj.bias', + 'transformer.resblocks.5.ln_1.weight', + 'transformer.resblocks.5.ln_1.bias', + 'transformer.resblocks.5.mlp.c_fc.weight', + 'transformer.resblocks.5.mlp.c_fc.bias', + 'transformer.resblocks.5.mlp.c_proj.weight', + 'transformer.resblocks.5.mlp.c_proj.bias', + 'transformer.resblocks.5.ln_2.weight', + 'transformer.resblocks.5.ln_2.bias', + 'transformer.resblocks.6.attn.in_proj_weight', + 'transformer.resblocks.6.attn.in_proj_bias', + 'transformer.resblocks.6.attn.out_proj.weight', + 'transformer.resblocks.6.attn.out_proj.bias', + 'transformer.resblocks.6.ln_1.weight', + 'transformer.resblocks.6.ln_1.bias', + 'transformer.resblocks.6.mlp.c_fc.weight', + 'transformer.resblocks.6.mlp.c_fc.bias', + 'transformer.resblocks.6.mlp.c_proj.weight', + 'transformer.resblocks.6.mlp.c_proj.bias', + 'transformer.resblocks.6.ln_2.weight', + 'transformer.resblocks.6.ln_2.bias', + 'transformer.resblocks.7.attn.in_proj_weight', + 'transformer.resblocks.7.attn.in_proj_bias', + 'transformer.resblocks.7.attn.out_proj.weight', + 'transformer.resblocks.7.attn.out_proj.bias', + 'transformer.resblocks.7.ln_1.weight', + 'transformer.resblocks.7.ln_1.bias', + 'transformer.resblocks.7.mlp.c_fc.weight', + 'transformer.resblocks.7.mlp.c_fc.bias', + 'transformer.resblocks.7.mlp.c_proj.weight', + 'transformer.resblocks.7.mlp.c_proj.bias', + 'transformer.resblocks.7.ln_2.weight', + 'transformer.resblocks.7.ln_2.bias', + 'transformer.resblocks.8.attn.in_proj_weight', + 'transformer.resblocks.8.attn.in_proj_bias', + 'transformer.resblocks.8.attn.out_proj.weight', + 'transformer.resblocks.8.attn.out_proj.bias', + 'transformer.resblocks.8.ln_1.weight', + 'transformer.resblocks.8.ln_1.bias', + 'transformer.resblocks.8.mlp.c_fc.weight', + 'transformer.resblocks.8.mlp.c_fc.bias', + 'transformer.resblocks.8.mlp.c_proj.weight', + 'transformer.resblocks.8.mlp.c_proj.bias', + 'transformer.resblocks.8.ln_2.weight', + 'transformer.resblocks.8.ln_2.bias', + 'transformer.resblocks.9.attn.in_proj_weight', + 'transformer.resblocks.9.attn.in_proj_bias', + 'transformer.resblocks.9.attn.out_proj.weight', + 'transformer.resblocks.9.attn.out_proj.bias', + 'transformer.resblocks.9.ln_1.weight', + 'transformer.resblocks.9.ln_1.bias', + 'transformer.resblocks.9.mlp.c_fc.weight', + 'transformer.resblocks.9.mlp.c_fc.bias', + 'transformer.resblocks.9.mlp.c_proj.weight', + 'transformer.resblocks.9.mlp.c_proj.bias', + 'transformer.resblocks.9.ln_2.weight', + 'transformer.resblocks.9.ln_2.bias', + 'transformer.resblocks.10.attn.in_proj_weight', + 'transformer.resblocks.10.attn.in_proj_bias', + 'transformer.resblocks.10.attn.out_proj.weight', + 'transformer.resblocks.10.attn.out_proj.bias', + 'transformer.resblocks.10.ln_1.weight', + 'transformer.resblocks.10.ln_1.bias', + 'transformer.resblocks.10.mlp.c_fc.weight', + 'transformer.resblocks.10.mlp.c_fc.bias', + 'transformer.resblocks.10.mlp.c_proj.weight', + 'transformer.resblocks.10.mlp.c_proj.bias', + 'transformer.resblocks.10.ln_2.weight', + 'transformer.resblocks.10.ln_2.bias', + 'transformer.resblocks.11.attn.in_proj_weight', + 'transformer.resblocks.11.attn.in_proj_bias', + 'transformer.resblocks.11.attn.out_proj.weight', + 'transformer.resblocks.11.attn.out_proj.bias', + 'transformer.resblocks.11.ln_1.weight', + 'transformer.resblocks.11.ln_1.bias', + 'transformer.resblocks.11.mlp.c_fc.weight', + 'transformer.resblocks.11.mlp.c_fc.bias', + 'transformer.resblocks.11.mlp.c_proj.weight', + 'transformer.resblocks.11.mlp.c_proj.bias', + 'transformer.resblocks.11.ln_2.weight', + 'transformer.resblocks.11.ln_2.bias', + 'token_embedding.weight', + 'visual.proj', + 'visual_proj', + 'ln_final.weight', + 'ln_final.bias', +} + + +def gradually_freeze_by_layer(model, global_step, interval, unfreeze_type="both"): + try: + visual_n_layers = model.clip.visual.transformer.layers + except: + visual_n_layers = model.visual.transformer.layers + + try: + text_n_layers = model.clip.transformer.layers + except: + text_n_layers = model.transformer.layers + + if interval <= 0: + return + + layer_to_unfreeze = global_step // interval + if ( + visual_n_layers + 1 < layer_to_unfreeze + and text_n_layers + 1 < layer_to_unfreeze + ): # Do nothing + return + embeddings = [ + "token_embedding", + "visual.positional_embedding" "visual.class_embedding", + "positional_embedding", + ] + visual_first_keys = [ + "visual.conv1.weight", + "visual.ln_pre", + ] + text_first_keys = [] + visual_last_keys = ["visual.ln_post", "visual_ln_post" "visual_proj", "visual.proj"] + text_last_keys = ["ln_final"] + + def set_unfreeze(n, p, l): + if any(x in n for x in l): + p.requires_grad = True + + for n, p in model.named_parameters(): + if layer_to_unfreeze == 0: # At the beginning of training + set_unfreeze(n, p, embeddings) # Unfreeze embeddings + if unfreeze_type in ["both", "visual"]: + set_unfreeze(n, p, visual_last_keys) + if unfreeze_type in ["both", "text"]: # Unfreeze text last layers + set_unfreeze(n, p, text_last_keys) + continue + + if unfreeze_type in ["both", "visual"]: + if layer_to_unfreeze <= visual_n_layers: # Unfreeze n-th visual layer + param_prefix = f"visual.transformer.resblocks.{visual_n_layers - layer_to_unfreeze}" + set_unfreeze(n, p, [param_prefix]) + else: # Unfreeze first visual layers + set_unfreeze(n, p, visual_first_keys) + if unfreeze_type in ["both", "text"]: + if layer_to_unfreeze <= text_n_layers: # Unfreeze n-th text layer + param_prefix = ( + f"transformer.resblocks.{visual_n_layers - layer_to_unfreeze}" + ) + set_unfreeze(n, p, [param_prefix]) + else: # Unfreeze first visual layers + set_unfreeze(n, p, text_first_keys) diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/coca.py b/Downstream/multi-modalities-downstream/CoTrain/modules/coca.py new file mode 100644 index 0000000..98bca7b --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/coca.py @@ -0,0 +1,478 @@ +# Modified from https://github.com/lucidrains/CoCa-pytorch/blob/main/coca_pytorch/coca_pytorch.py + +from turtle import forward +import torch +from torch import einsum, nn +import torch.nn.functional as F +from einops import rearrange, repeat + +# helper functions + +def exists(val): + return val is not None + +def default(val, d): + return val if exists(val) else d + +# for controlling freezing of parameters + +def set_module_requires_grad_(module, requires_grad): + for param in module.parameters(): + param.requires_grad = requires_grad + +def freeze_all_layers_(module): + set_module_requires_grad_(module, False) + +def unfreeze_all_layers_(module): + set_module_requires_grad_(module, True) + +def freeze_model_and_make_eval_(model): + model.eval() + freeze_all_layers_(model) + +# normalization +# they use layernorm without bias, something that pytorch does not offer + + +class LayerNorm(nn.Module): + def __init__(self, dim): + super().__init__() + self.gamma = nn.Parameter(torch.ones(dim)) + self.register_buffer("beta", torch.zeros(dim)) + + def forward(self, x): + return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta) + +# residual + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x, *args, **kwargs): + return self.fn(x, *args, **kwargs) + x + + +# rotary positional embedding +# https://arxiv.org/abs/2104.09864 + + +class RotaryEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer("inv_freq", inv_freq) + + def forward(self, max_seq_len, *, device): + seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype) + freqs = einsum("i , j -> i j", seq, self.inv_freq) + return torch.cat((freqs, freqs), dim=-1) + + +def rotate_half(x): + x = rearrange(x, "... (j d) -> ... j d", j=2) + x1, x2 = x.unbind(dim=-2) + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(pos, t): + return (t * pos.cos()) + (rotate_half(t) * pos.sin()) + + +# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward +# https://arxiv.org/abs/2002.05202 + + +class SwiGLU(nn.Module): + def forward(self, x): + x, gate = x.chunk(2, dim=-1) + return F.silu(gate) * x + + +# parallel attention and feedforward with residual +# discovered by Wang et al + EleutherAI from GPT-J fame + + +class ParallelTransformerBlock(nn.Module): + def __init__(self, dim, dim_head=64, heads=8, ff_mult=4): + super().__init__() + self.norm = LayerNorm(dim) + + attn_inner_dim = dim_head * heads + ff_inner_dim = dim * ff_mult + self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2)) + + self.heads = heads + self.scale = dim_head**-0.5 + self.rotary_emb = RotaryEmbedding(dim_head) + + self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False) + self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False) + + self.ff_out = nn.Sequential( + SwiGLU(), + nn.Linear(ff_inner_dim, dim, bias=False) + ) + + # for caching causal mask and rotary embeddings + + self.register_buffer("mask", None, persistent=False) + self.register_buffer("pos_emb", None, persistent=False) + + def get_mask(self, n, device): + if self.mask is not None and self.mask.shape[-1] >= n: + return self.mask[:n, :n] + + mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1) + self.register_buffer("mask", mask, persistent=False) + return mask + + def get_rotary_embedding(self, n, device): + if self.pos_emb is not None and self.pos_emb.shape[-2] >= n: + return self.pos_emb[:n] + + pos_emb = self.rotary_emb(n, device=device) + self.register_buffer("pos_emb", pos_emb, persistent=False) + return pos_emb + + def forward(self, x, attn_mask=None): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + n, device, h = x.shape[1], x.device, self.heads + + # pre layernorm + + x = self.norm(x) + + # attention queries, keys, values, and feedforward inner + + q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1) + + # split heads + # they use multi-query single-key-value attention, yet another Noam Shazeer paper + # they found no performance loss past a certain scale, and more efficient decoding obviously + # https://arxiv.org/abs/1911.02150 + + q = rearrange(q, "b n (h d) -> b h n d", h=h) + + # rotary embeddings + + positions = self.get_rotary_embedding(n, device) + q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k)) + + # scale + + q = q * self.scale + + # similarity + + sim = einsum("b h i d, b j d -> b h i j", q, k) + + # causal mask + + causal_mask = self.get_mask(n, device) + sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) + + # extra attention mask - for masking out attention from text CLS token to padding + + if exists(attn_mask): + attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j') + sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max) + + # attention + + sim = sim - sim.amax(dim=-1, keepdim=True).detach() + attn = sim.softmax(dim=-1) + + # aggregate values + + out = einsum("b h i j, b j d -> b h i d", attn, v) + + # merge heads + + out = rearrange(out, "b h n d -> b n (h d)") + return self.attn_out(out) + self.ff_out(ff) + +# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward + +class CrossAttention(nn.Module): + def __init__( + self, + dim, + *, + context_dim=None, + dim_head=64, + heads=8, + parallel_ff=False, + ff_mult=4, + norm_context=False + ): + super().__init__() + self.heads = heads + self.scale = dim_head ** -0.5 + inner_dim = heads * dim_head + context_dim = default(context_dim, dim) + + self.norm = LayerNorm(dim) + self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity() + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + # whether to have parallel feedforward + + ff_inner_dim = ff_mult * dim + + self.ff = nn.Sequential( + nn.Linear(dim, ff_inner_dim * 2, bias=False), + SwiGLU(), + nn.Linear(ff_inner_dim, dim, bias=False) + ) if parallel_ff else None + + def forward(self, x, context): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + # pre-layernorm, for queries and context + + x = self.norm(x) + context = self.context_norm(context) + + # get queries + + q = self.to_q(x) + q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads) + + # scale + + q = q * self.scale + + # get key / values + + k, v = self.to_kv(context).chunk(2, dim=-1) + + # query / key similarity + + sim = einsum('b h i d, b j d -> b h i j', q, k) + + # attention + + sim = sim - sim.amax(dim=-1, keepdim=True) + attn = sim.softmax(dim=-1) + + # aggregate + + out = einsum('b h i j, b j d -> b h i d', attn, v) + + # merge and combine heads + + out = rearrange(out, 'b h n d -> b n (h d)') + out = self.to_out(out) + + # add parallel feedforward (for multimodal layers) + + if exists(self.ff): + out = out + self.ff(x) + + return out + +# transformer + + +class CoCa(nn.Module): + def __init__( + self, + *, + dim, + num_tokens, + unimodal_depth, + multimodal_depth, + image_dim = None, + num_img_queries=256, + dim_head=64, + heads=8, + ff_mult=4, + img_encoder=None, + caption_loss_weight=1., + contrastive_loss_weight=1., + pad_id=0 + ): + super().__init__() + self.dim = dim + + self.pad_id = pad_id + self.caption_loss_weight = caption_loss_weight + self.contrastive_loss_weight = contrastive_loss_weight + + # token embeddings + + self.token_emb = nn.Embedding(num_tokens, dim) + self.text_cls_token = nn.Parameter(torch.randn(dim)) + + # image encoder + + self.img_encoder = img_encoder + + if exists(self.img_encoder): + freeze_model_and_make_eval_(self.img_encoder) + + # attention pooling for image tokens + + self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning + self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True) + + self.img_attn_pool_norm = LayerNorm(dim) + self.text_cls_norm = LayerNorm(dim) + + # contrastive learning temperature + + self.temperature = nn.Parameter(torch.Tensor([1.])) + + # unimodal layers + + self.unimodal_layers = nn.ModuleList([]) + for ind in range(unimodal_depth): + self.unimodal_layers.append( + Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)), + ) + + # multimodal layers + + self.multimodal_layers = nn.ModuleList([]) + for ind in range(multimodal_depth): + self.multimodal_layers.append(nn.ModuleList([ + Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)), + Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult)) + ])) + + # to logits + + self.to_logits = nn.Sequential( + LayerNorm(dim), + nn.Linear(dim, num_tokens, bias=False) + ) + + # they used embedding weight tied projection out to logits, not common, but works + self.to_logits[-1].weight = self.token_emb.weight + nn.init.normal_(self.token_emb.weight, std=0.02) + + def embed_text(self, text): + batch, device = text.shape[0], text.device + + seq = text.shape[1] + + text_tokens = self.token_emb(text) + + # append text cls tokens + + text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch) + text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2) + + # create specific mask for text cls token at the end + # to prevent it from attending to padding + + cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j') + attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True) + + # go through unimodal layers + + for attn_ff in self.unimodal_layers: + text_tokens = attn_ff(text_tokens, attn_mask=attn_mask) + + # get text cls token + + text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1] + text_embeds = self.text_cls_norm(text_cls_tokens) + return text_embeds, text_tokens + + def embed_image(self, images=None, image_tokens=None): + # encode images into embeddings + # with the img_encoder passed in at init + # it can also accept precomputed image tokens + + assert not (exists(images) and exists(image_tokens)) + + if exists(images): + assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding' + + self.img_encoder.eval() + with torch.no_grad(): + image_tokens = self.img_encoder(images).detach() + + # attention pool image tokens + + img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0]) + img_queries = self.img_attn_pool(img_queries, image_tokens) + img_queries = self.img_attn_pool_norm(img_queries) + + return img_queries[:, 0], img_queries[:, 1:] + + def forward( + self, + text, + images=None, + image_tokens=None, + labels=None, + return_loss=False, + return_embeddings=False + ): + batch, device = text.shape[0], text.device + + if return_loss and not exists(labels): + text, labels = text[:, :-1], text[:, 1:] + + text_embeds, text_tokens = self.embed_text(text) + + image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens) + + # return embeddings if that is what the researcher wants + + if return_embeddings: + return text_embeds, image_embeds + + # go through multimodal layers + + for attn_ff, cross_attn in self.multimodal_layers: + text_tokens = attn_ff(text_tokens) + text_tokens = cross_attn(text_tokens, image_tokens) + + logits = self.to_logits(text_tokens) + + if not return_loss: + return logits + + # shorthand + + ce = F.cross_entropy + + # calculate caption loss (cross entropy loss) + + logits = rearrange(logits, 'b n c -> b c n') + caption_loss = ce(logits, labels, ignore_index=self.pad_id) + caption_loss = caption_loss * self.caption_loss_weight + + # calculate contrastive loss + + sim = einsum('i d, j d -> i j', text_embeds, image_embeds) + sim = sim * self.temperature.exp() + contrastive_labels = torch.arange(batch, device=device) + + contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5 + contrastive_loss = contrastive_loss * self.contrastive_loss_weight + + return caption_loss + contrastive_loss + diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_module.py b/Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_module.py new file mode 100644 index 0000000..d4d272d --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_module.py @@ -0,0 +1,558 @@ +import torch +import torch.nn as nn +import pytorch_lightning as pl +from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings +from CoTrain.modules import heads, cotrain_utils +from CoTrain.modules import objectives as objectives +from CoTrain.modules import base_vision_transformer as vit +from CoTrain.modules.temporal_roll import TemporalRoll +import torch.nn.functional as F +import math +from CoTrain.modules.cotrain_utils import state_dict_dino_fix + + +class VCOPHeader(torch.nn.Module): + def __init__(self, tuple_len=3, feature_size=768): + """ + In the constructor we instantiate two nn.Linear modules and assign them as + member variables. + """ + super(VCOPHeader, self).__init__() + self.feature_size = feature_size + self.fc7 = nn.Linear(self.feature_size * 2, 512) + self.tuple_len = tuple_len + pair_num = int(tuple_len * (tuple_len - 1) / 2) + self.class_num = math.factorial(tuple_len) + self.fc8 = nn.Linear(512 * pair_num, self.class_num) + self.dropout = nn.Dropout(p=0.5) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """ + """ + pf = [] # pairwise concat + for i in range(self.tuple_len): + for j in range(i + 1, self.tuple_len): + pf.append(torch.cat([x[:, i], x[:, j]], dim=1)) + pf = [self.fc7(i) for i in pf] + pf = [self.relu(i) for i in pf] + h = torch.cat(pf, dim=1) + h = self.dropout(h) + h = self.fc8(h) # logits + return h + + +class CoTrainTransformerSS(pl.LightningModule): + def __init__(self, config): + super().__init__() + self.save_hyperparameters() + + bert_config = BertConfig( + vocab_size=config["vocab_size"], + hidden_size=config["hidden_size"], + num_hidden_layers=config["num_layers"], + num_attention_heads=config["num_heads"], + intermediate_size=config["hidden_size"] * config["mlp_ratio"], + max_position_embeddings=config["max_text_len"], + hidden_dropout_prob=config["drop_rate"], + attention_probs_dropout_prob=config["drop_rate"], + ) + + self.text_embeddings = BertEmbeddings(bert_config) + self.text_embeddings.apply(objectives.init_weights) + + self.token_type_embeddings = nn.Embedding(2, config["hidden_size"]) + self.token_type_embeddings.apply(objectives.init_weights) + + flag = 0 + if self.hparams.config["load_path"] == "": + while not flag == 1: + try: + self.transformer = getattr(vit, self.hparams.config["vit"])( + pretrained=True, config=self.hparams.config + ) + flag = 1 + except: + print("load pretrained failed, try again") + flag = 0 + else: + self.transformer = getattr(vit, self.hparams.config["vit"])( + pretrained=False, config=self.hparams.config + ) + + self.pooler = heads.Pooler(config["hidden_size"]) + self.pooler.apply(objectives.init_weights) + + # num frames + self.num_frames = config["num_frames"] # a global variable to identify if image/video + + if config["loss_names"]["mlm"] > 0: + self.mlm_score = heads.MLMHead(bert_config) + self.mlm_score.apply(objectives.init_weights) + + if config["loss_names"]["vtm"] > 0: + self.vtm_score = heads.vtmHead(config["hidden_size"]) + self.vtm_score.apply(objectives.init_weights) + + if config["loss_names"]["mpp"] > 0: + self.mpp_score = heads.MPPHead(bert_config) + self.mpp_score.apply(objectives.init_weights) + + # vtc may also used for pretrain + # == for video text contrastive learning + if config["loss_names"]["vtc"] > 0: + print("initalize video project and txt projection") + # v1 + self.txt_proj = nn.Linear(config["hidden_size"], config["shared_embedding_dim"]) + self.vid_proj = nn.Linear(config["hidden_size"], config["shared_embedding_dim"]) + # v2 + # self.vid_proj = nn.Sequential( + # nn.Dropout(0.5), + # nn.Linear(config["hidden_size"], config["hidden_size"] // 2), + # nn.LayerNorm(config["hidden_size"] // 2), + # nn.GELU(), + # nn.Linear(config["hidden_size"] // 2, config["shared_embedding_dim"]), + # ) + # self.txt_proj = nn.Sequential( + # nn.Dropout(0.5), + # nn.Linear(config["hidden_size"], config["hidden_size"] // 2), + # nn.LayerNorm(config["hidden_size"] // 2), + # nn.GELU(), + # nn.Linear(config["hidden_size"] // 2, config["shared_embedding_dim"]), + # ) + self.txt_proj.apply(objectives.init_weights) + self.vid_proj.apply(objectives.init_weights) + # == end + if ( + self.hparams.config["load_path"] != "" + and not self.hparams.config["test_only"] + ): + print("0" * 200) + ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu") + state_dict = ckpt["state_dict"] + # if downstream max text token length not consistent with pretrain + if self.text_embeddings.position_embeddings.weight.size() != state_dict['text_embeddings.position_embeddings.weight'].size(): + state_dict.pop('text_embeddings.position_embeddings.weight', None) + state_dict.pop('text_embeddings.position_ids', None) + new_state_dict = state_dict_dino_fix(state_dict, self.state_dict()) + # new_state_dict = self._inflate_positional_embeds(state_dict) + self.load_state_dict(new_state_dict, strict=False) + # self.load_state_dict(state_dict, strict=False) + if self.hparams.config["linear_evaluation"]: + for name, param in self.named_parameters(): + # only train project layer + if 'mlm_score' in name or 'vtm_score' in name or 'mpp_score' in name: + param.requires_grad = True + elif 'txt_proj' in name or 'vid_proj' in name: + param.requires_grad = True + elif 'pooler' in name: + param.requires_grad = True + else: + param.requires_grad = False + # flag = False + # for name, param in self.named_parameters(): + # if '20' in name: + # flag = True + # param.requires_grad = flag + # trainable_params = filter(lambda p: p.requires_grad, self.parameters()) + # ===================== Downstream ===================== # + + hs = self.hparams.config["hidden_size"] + # print(config["loss_names"]) + if config["loss_names"]["multiple_choice"] > 0: + self.vtm_score = heads.vtmHead(config["hidden_size"]) + self.vtm_score.apply(objectives.init_weights) + + # alex: vcr q2a task + if config["loss_names"]["vcr_q2a"] > 0: + self.vtm_score = heads.vtmHead(config["hidden_size"]) + self.vtm_score.apply(objectives.init_weights) + + # alex: tvqa + if config["loss_names"]["mc_vqa"] > 0: + self.vtm_score = heads.vtmHead(config["hidden_size"]) + self.vtm_score.apply(objectives.init_weights) + + if self.hparams.config["loss_names"]["vqa"] > 0: + vs = self.hparams.config["vqav2_label_size"] + self.vqa_classifier = nn.Sequential( + nn.Linear(hs, hs * 2), + nn.LayerNorm(hs * 2), + nn.GELU(), + nn.Linear(hs * 2, vs), + ) + self.vqa_classifier.apply(objectives.init_weights) + + # alex: add for vcr: q2a + if self.hparams.config["loss_names"]["vcr_q2a"] > 0: + # for q2a + self.rank_output = nn.Linear(hs, 1) + self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :] + self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:] + # for qa2r + self.rank_output_2 = nn.Linear(hs, 1) + self.rank_output_2.weight.data = self.vtm_score.fc.weight.data[1:, :] + self.rank_output_2.bias.data = self.vtm_score.fc.bias.data[1:] + + self.margin = 0.2 + + # add for vcop prediction + if self.hparams.config["loss_names"]["vcop"] > 0: + self.vcop_classifier = VCOPHeader(tuple_len=self.num_frames, feature_size=hs) + + # add for tvqa + if self.hparams.config["loss_names"]["mc_vqa"] > 0: + # # v1: for q2a with vtm_score + # self.rank_output = nn.Linear(hs, 1) + # self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :] + # self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:] + # self.dropout = nn.Dropout(0.1) + self.mc_vqa_classifier = nn.Sequential( + nn.Dropout(0.1), + nn.Linear(hs, 256), + nn.LayerNorm(256), + nn.GELU(), + nn.Linear(256, 1), + ) + self.mc_vqa_classifier.apply(objectives.init_weights) + + # alex: add for openend_vqa + if self.hparams.config["loss_names"]["openend_vqa"] > 0: + vs = self.hparams.config["msrvttqa_label_size"] + # self.vqa_classifier = nn.Sequential( + # nn.Dropout(0.5), + # nn.Linear(hs, hs * 2), + # nn.LayerNorm(hs * 2), + # nn.GELU(), + # nn.Linear(hs * 2, vs), + # ) + # small dataset + self.vqa_classifier = nn.Sequential( + nn.Dropout(0.5), + nn.Linear(hs, hs//2), + nn.LayerNorm(hs//2), + nn.GELU(), + nn.Linear(hs//2, vs), + ) + self.vqa_classifier.apply(objectives.init_weights) + + if self.hparams.config["loss_names"]["nlvr2"] > 0: + self.nlvr2_classifier = nn.Sequential( + nn.Linear(hs * 2, hs * 2), + nn.LayerNorm(hs * 2), + nn.GELU(), + nn.Linear(hs * 2, 2), + ) + self.nlvr2_classifier.apply(objectives.init_weights) + emb_data = self.token_type_embeddings.weight.data + self.token_type_embeddings = nn.Embedding(3, hs) + self.token_type_embeddings.apply(objectives.init_weights) + self.token_type_embeddings.weight.data[0, :] = emb_data[0, :] + self.token_type_embeddings.weight.data[1, :] = emb_data[1, :] + self.token_type_embeddings.weight.data[2, :] = emb_data[1, :] + + if self.hparams.config["loss_names"]["irtr"] > 0: + self.rank_output = nn.Linear(hs, 1) + self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :] + self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:] + self.margin = 0.2 + # for p in self.vtm_score.parameters(): # alex: requires_grad = true? + # p.requires_grad = False + + # test msrvtt multiple choice without finetune + if self.hparams.config["loss_names"]["multiple_choice"] > 0: + self.rank_output = nn.Linear(hs, 1) + self.rank_output.weight.data = self.vtm_score.fc.weight.data[1:, :] + self.rank_output.bias.data = self.vtm_score.fc.bias.data[1:] + self.margin = 0.2 + + cotrain_utils.set_metrics(self) + self.current_tasks = list() + + self.temporal_roll_module = TemporalRoll(n_segment=self.num_frames, v=0) + + # ===================== load downstream (test_only) ====================== + + if self.hparams.config["load_path"] != "" and self.hparams.config["test_only"]: + print("====load checkpoint=====") + ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu") + state_dict = ckpt["state_dict"] + print('*' * 30) + print("current state dict") + print('*' * 30) + for k, v in self.state_dict().items(): + print(k) + # temporal embed and fix model ? + # new_state_dict = state_dict_data_parallel_fix(state_dict, self.state_dict()) + new_state_dict = state_dict_dino_fix(state_dict, self.state_dict()) + # new_state_dict = self._inflate_positional_embeds(state_dict) + self.load_state_dict(new_state_dict, strict=False) + # self.load_state_dict(state_dict, strict=False) + # # # print learnable param + # for name, param in self.named_parameters(): + # if param.requires_grad: + # print("learned param: ", name) + + def infer( + self, + batch, + mask_text=False, + mask_video=False, + video_token_type_idx=1, + video_embeds=None, + video_masks=None, + input_video_only=False, + input_text_only=False, + mode="video" + ): + # if text: process in normal video + # if video: repeat the text tensor for K times + if f"video_{video_token_type_idx - 1}" in batch: + imgkey = f"video_{video_token_type_idx - 1}" + else: + imgkey = "video" + do_mlm = "_mlm" if mask_text else "" + text_ids = batch[f"text_ids{do_mlm}"] + text_labels = batch[f"text_labels{do_mlm}"] + text_masks = batch[f"text_masks"] + # print(batch[imgkey]) + self.num_frames = batch[imgkey][0].size(1) + if not input_video_only: + text_embeds = self.text_embeddings(text_ids) + video_labels = None + patch_index = None + if not input_text_only: + if video_embeds is None and video_masks is None: + img = batch[imgkey][0] + img = img.contiguous().view(-1, img.size()[2], img.size()[3], img.size()[4]) # btchw to [bt]chw + ( + video_embeds, + video_masks, + patch_index, + video_labels, + ) = self.transformer.visual_embed( + img, + max_image_len=self.hparams.config["max_image_len"], + mask_it=mask_video, + mode=mode + ) + else: + patch_index, video_labels = ( + None, + None, + ) + if not input_video_only: + text_embeds = text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)) + text_embeds = torch.repeat_interleave(text_embeds, self.num_frames, dim=0) + text_masks = torch.repeat_interleave(text_masks, self.num_frames, dim=0) + if not input_text_only: + video_embeds = video_embeds + self.token_type_embeddings(torch.full_like(video_masks, video_token_type_idx)) + + # print(text_embeds.size(), video_embeds.size()) + if not input_text_only and not input_video_only: + co_embeds = torch.cat([text_embeds, video_embeds], dim=1) + co_masks = torch.cat([text_masks, video_masks], dim=1) + x = co_embeds + if input_text_only: + x = text_embeds + co_masks = text_masks + if input_video_only: + x = video_embeds + co_masks = video_masks + + for i, blk in enumerate(self.transformer.blocks): + # perform temporal roll operation for temporal modeling [video only] + if self.num_frames > 1 and not input_video_only and not input_text_only: + text_feats, image_feats = ( + x[:, : text_embeds.shape[1]], + x[:, text_embeds.shape[1]:], + ) + image_feats = self.temporal_roll_module(image_feats, i) + x = torch.cat((text_feats, image_feats), dim=1) + x, _attn = blk(x, mask=co_masks) + x = self.transformer.norm(x) + # reshape to video tensor + x = x.view(x.size(0) // self.num_frames, -1, x.size(-2), + x.size(-1)) + # add vcop here + h = None + if self.hparams.config["loss_names"]["vcop"] > 0 and mode == "video": + h = x + x = torch.mean(x, dim=1) + if input_text_only: + text_feats = x + if "vtc" in self.current_tasks: + text_feats = self.txt_proj(text_feats) + video_feats = None + if input_video_only: + video_feats = x + if "vtc" in self.current_tasks: + video_feats = self.vid_proj(video_feats) + text_feats = None + if not input_text_only and not input_video_only: + text_feats, video_feats = ( + x[:, : text_embeds.shape[1]], + x[:, text_embeds.shape[1]:], + ) + cls_feats = self.pooler(x) + if not input_video_only: + text_masks = text_masks[::self.num_frames].contiguous() + if not input_text_only: + video_masks = video_masks[::self.num_frames].contiguous() + ret = { + "text_feats": text_feats, + "video_feats": video_feats, + "cls_feats": cls_feats, + "raw_cls_feats": x[:, 0], + "video_labels": video_labels, + "video_masks": video_masks, + "text_labels": text_labels, + "text_ids": text_ids, + "text_masks": text_masks, + "patch_index": patch_index, + "vcop_features": h + } + return ret + + def forward(self, batch, mode="video"): + ret = dict() + if len(self.current_tasks) == 0: + ret.update(self.infer(batch, mode=mode)) + return ret + + # Masked Language Modeling + if "mlm" in self.current_tasks: + ret.update(objectives.compute_mlm(self, batch, mode=mode)) + + # video Text Matching + if "vtm" in self.current_tasks: + ret.update(objectives.compute_vtm_wpa(self, batch, mode=mode)) + # ret.update(objectives.compute_vtm_wpa_dino(self, batch, mode=mode)) + + # video Text Contrastive + if "vtc" in self.current_tasks: + ret.update(objectives.compute_vtc(self, batch, mode=mode)) + + # Visual Question Answering + if "vqa" in self.current_tasks: + ret.update(objectives.compute_vqa(self, batch)) + + # alex: msrvtt Visual Question Answering + if "openend_vqa" in self.current_tasks: + ret.update(objectives.compute_openend_vqa(self, batch)) + + # alex: vcop only for video + if "vcop" in self.current_tasks and mode == "video": + ret.update(objectives.compute_vcop(self, batch)) + + # alex: vcr qa + if "vcr_q2a" in self.current_tasks: + ret.update(objectives.compute_vcr_q2a(self, batch)) + + # alex: mc_vqa + if "mc_vqa" in self.current_tasks: + ret.update(objectives.compute_mc_vqa_q2a(self, batch)) + + # alex: msrvtt multiple choice setting + if "multiple_choice" in self.current_tasks: + ret.update(objectives.compute_multiple_choice(self, batch)) + + # video Retrieval and Text Retrieval + if "irtr" in self.current_tasks: + ret.update(objectives.compute_irtr(self, batch)) + + return ret + + def training_step(self, batch, batch_idx): + cotrain_utils.set_task(self) + # co-training + if "v" in batch and "i" in batch: + video_output = self(batch["v"], mode="video") + image_output = self(batch["i"], mode="image") + total_loss = sum([v for k, v in video_output.items() if "loss" in k]) + sum([v for k, v in image_output.items() if "loss" in k]) + else: + output = self(batch, mode="video") + total_loss = sum([v for k, v in output.items() if "loss" in k]) + return total_loss + + def training_epoch_end(self, outs): + cotrain_utils.epoch_wrapup(self) + + def validation_step(self, batch, batch_idx): + cotrain_utils.set_task(self) + if "v" in batch and "i" in batch: + video_output = self(batch["v"], mode="video") + image_output = self(batch["i"], mode="image") + else: + output = self(batch, mode="video") + + def validation_epoch_end(self, outs): + cotrain_utils.epoch_wrapup(self) + + def test_step(self, batch, batch_idx): + cotrain_utils.set_task(self) + if "v" in batch and "i" in batch: + video_output = self(batch["v"], mode="video") + image_output = self(batch["i"], mode="image") + else: + output = self(batch, mode="video") + ret = dict() + + if self.hparams.config["loss_names"]["vqa"] > 0: + ret.update(objectives.vqa_test_step(self, batch, image_output)) + + return ret + + def test_epoch_end(self, outs): + model_name = self.hparams.config["load_path"].split("/")[-1][:-5] + + if self.hparams.config["loss_names"]["vqa"] > 0: + objectives.vqa_test_wrapup(outs, model_name) + cotrain_utils.epoch_wrapup(self) + + def configure_optimizers(self): + return cotrain_utils.set_schedule(self) + + def _inflate_positional_embeds(self, new_state_dict, load_temporal_fix='zeros'): + # allow loading of timesformer with fewer num_frames + curr_keys = list(self.state_dict().keys()) + if 'transformer.temporal_embed' in new_state_dict and 'transformer.temporal_embed' in curr_keys: + load_temporal_embed = new_state_dict['transformer.temporal_embed'] + load_num_frames = load_temporal_embed.shape[1] + curr_num_frames = self.hparams.config['num_frames'] + embed_dim = load_temporal_embed.shape[2] + + if load_num_frames != curr_num_frames: + if load_num_frames > curr_num_frames: + print(f'### loaded {self.hparams.config["vit"]} model has MORE frames than current...' + f'### loading weights, filling in the extras via {load_temporal_fix}') + new_temporal_embed = load_temporal_embed[:, :curr_num_frames, :] + else: + print(f'### loaded {self.hparams.config["vit"]} model has FEWER frames than current...' + f'### loading weights, filling in the extras via {load_temporal_fix}') + if load_temporal_fix == 'zeros': + new_temporal_embed = torch.zeros([load_temporal_embed.shape[0], curr_num_frames, embed_dim]) + new_temporal_embed[:, :load_num_frames] = load_temporal_embed + elif load_temporal_fix in ['interp', 'bilinear']: + # interpolate + # unsqueeze so pytorch thinks its an image + mode = 'nearest' + if load_temporal_fix == 'bilinear': + mode = 'bilinear' + load_temporal_embed = load_temporal_embed.unsqueeze(0) + new_temporal_embed = F.interpolate(load_temporal_embed, + (curr_num_frames, embed_dim), mode=mode).squeeze(0) + else: + raise NotImplementedError + new_state_dict['transformer.temporal_embed'] = new_temporal_embed + # allow loading with smaller spatial patches. assumes custom border crop, to append the + # border patches to the input sequence + if 'transformer.pos_embed' in new_state_dict and 'transformer.pos_embed' in curr_keys: + load_pos_embed = new_state_dict['transformer.pos_embed'] + load_num_patches = load_pos_embed.shape[1] + curr_pos_embed = self.state_dict()['transformer.pos_embed'] + if load_num_patches != curr_pos_embed.shape[1]: + raise NotImplementedError( + 'Loading models with different spatial resolution / patch number not yet implemented, sorry.') + + return new_state_dict \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_utils.py b/Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_utils.py new file mode 100644 index 0000000..81b4bb7 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_utils.py @@ -0,0 +1,633 @@ +import torch +import io +import random + +from transformers import ( + get_polynomial_decay_schedule_with_warmup, + get_cosine_schedule_with_warmup, +) +from CoTrain.modules.dist_utils import all_gather +from CoTrain.modules.objectives import compute_irtr_recall, compute_decouple_irtr_recall, compute_ind_irtr_recall +# from CoTrain.gadgets.my_metrics import Accuracy, VQAScore, Scalar +from CoTrain.gadgets.my_metrics import VQAScore +from torchmetrics import Accuracy +from torchmetrics import MeanMetric as Scalar +from CoTrain.datasets import client +from .clip_param_keys import clip_param_keys + + +def set_split_metrics(pl_module, split, loss_names): + for k, v in loss_names.items(): + if v < 1: + continue + if k == "vqa": + setattr(pl_module, f"{split}_vqa_score", VQAScore()) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + # vcr + elif k == "vcr_q2a": + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + setattr(pl_module, f"{split}_vcr_qar_loss", Scalar()) + setattr(pl_module, f"{split}_vcr_qar_accuracy", Accuracy()) + elif k == "mc_vqa": + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + elif k == "openend_vqa": + setattr(pl_module, f"{split}_vqa_score", VQAScore()) + setattr(pl_module, f"{split}_vqa_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy(ignore_index=-100)) + elif k == "vcop": + setattr(pl_module, f"{split}_vcop_score", VQAScore()) + setattr(pl_module, f"{split}_vcop_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + elif k == "multiple_choice": + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + elif k == "nlvr2": + if split == "train": + setattr(pl_module, f"train_{k}_accuracy", Accuracy()) + setattr(pl_module, f"train_{k}_loss", Scalar()) + else: + setattr(pl_module, f"dev_{k}_accuracy", Accuracy()) + setattr(pl_module, f"dev_{k}_loss", Scalar()) + setattr(pl_module, f"test_{k}_accuracy", Accuracy()) + setattr(pl_module, f"test_{k}_loss", Scalar()) + elif k == "irtr": + setattr(pl_module, f"{split}_irtr_loss", Scalar()) + elif k == "mppd" or k == "mpfr": + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + elif k == "vtm": + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_dino_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_wpa_loss", Scalar()) + # add for image text contrastive learning + elif k == "itc": + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + elif k == "dino": + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + elif k == "contrastive": + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + setattr(pl_module, f"{split}_{k}_image_accuracy", Accuracy()) + setattr(pl_module, f"{split}_{k}_text_accuracy", Accuracy()) + elif k == "zs_classify": + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + elif k == "mlm": + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy(ignore_index=-100)) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + else: + setattr(pl_module, f"{split}_{k}_accuracy", Accuracy()) + setattr(pl_module, f"{split}_{k}_loss", Scalar()) + + +def set_metrics(pl_module): + set_split_metrics(pl_module, "train", pl_module.hparams.config["loss_names"]) + if len(pl_module.hparams.config["val_datasets"]) == 0: + set_split_metrics(pl_module, "val", pl_module.hparams.config["loss_names"]) + else: + set_split_metrics(pl_module, "val", pl_module.hparams.config["val_loss_names"]) + + +def epoch_wrapup(pl_module): + phase = "train" if pl_module.training else "val" + the_metric = 0 + the_metric_qar = 0 + if pl_module.hparams.config["get_recall_metric"] and not pl_module.training: + (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module) + if torch.distributed.get_rank() == 0: + print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step) + pl_module.logger.experiment.add_scalar( + "recalls/ir_r1", ir_r1, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/ir_r5", ir_r5, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/ir_r10", ir_r10, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/tr_r1", tr_r1, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/tr_r5", tr_r5, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/tr_r10", tr_r10, pl_module.global_step + ) + the_metric += ir_r1.item() + tr_r1.item() + + # add for ind irtr + if pl_module.hparams.config["get_ind_recall_metric"] and not pl_module.training: + (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_ind_irtr_recall(pl_module) + print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step) + pl_module.logger.experiment.add_scalar( + "recalls/ir_r1", ir_r1, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/ir_r5", ir_r5, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/ir_r10", ir_r10, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/tr_r1", tr_r1, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/tr_r5", tr_r5, pl_module.global_step + ) + pl_module.logger.experiment.add_scalar( + "recalls/tr_r10", tr_r10, pl_module.global_step + ) + # the_metric += ir_r1.item() + tr_r1.item() + the_metric += ir_r1 + tr_r1 + # == end + + if phase == "val" and len(pl_module.hparams.config["val_datasets"]) > 0: + # We are using a special dataset for val + loss_names = pl_module.hparams.config["val_loss_names"] + else: + loss_names = pl_module.hparams.config["loss_names"] + + for loss_name, v in loss_names.items(): + if v < 1: + continue + if loss_name == "mim" and not hasattr(pl_module, "visual_decoder"): + # We may choose not to decode + continue + + value = 0 + qar_value = 0 + if loss_name == "vqa": + value = getattr(pl_module, f"{phase}_{loss_name}_score").compute() + pl_module.log(f"{loss_name}/{phase}/score_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_score").reset() + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), sync_dist=True + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + elif loss_name == "vcr_q2a": + # q2a + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), sync_dist=True + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + # qar + pl_module.log( + f"vcr_qar/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_vcr_qar_loss").compute(), sync_dist=True + ) + getattr(pl_module, f"{phase}_vcr_qar_loss").reset() + qar_value = getattr(pl_module, f"{phase}_vcr_qar_accuracy").compute() + pl_module.log(f"vcr_qar/{phase}/accuracy_epoch", qar_value, sync_dist=True) + getattr(pl_module, f"{phase}_vcr_qar_accuracy").reset() + # mc_vqa + elif loss_name == "mc_vqa": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + elif loss_name == "openend_vqa": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_vqa_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_vqa_loss").reset() + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + # multiple_choice + elif loss_name == "multiple_choice": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + # vcop + elif loss_name == "vcop": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + elif loss_name == "nlvr2": + if phase == "train": + value = getattr(pl_module, f"train_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/train/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"train_{loss_name}_accuracy").reset() + pl_module.log( + f"{loss_name}/train/loss_epoch", + getattr(pl_module, f"train_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"train_{loss_name}_loss").reset() + else: + value = getattr(pl_module, f"dev_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/dev/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"dev_{loss_name}_accuracy").reset() + pl_module.log( + f"{loss_name}/dev/loss_epoch", + getattr(pl_module, f"dev_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"dev_{loss_name}_loss").reset() + + value = getattr(pl_module, f"test_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/test/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"test_{loss_name}_accuracy").reset() + pl_module.log( + f"{loss_name}/test/loss_epoch", + getattr(pl_module, f"test_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"test_{loss_name}_loss").reset() + elif loss_name == "irtr": + pl_module.log( + f"{loss_name}/{phase}/irtr_loss_epoch", + getattr(pl_module, f"{phase}_irtr_loss").compute(), sync_dist=True + ) + getattr(pl_module, f"{phase}_irtr_loss").reset() + elif loss_name == "mppd" or loss_name == "mpfr": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + elif loss_name == "vtm": + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + pl_module.log( + f"{loss_name}/{phase}/wpa_loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_wpa_loss").reset() + # add dino loss + pl_module.log( + f"{loss_name}/{phase}/dino_loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_dino_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_dino_loss").reset() + elif loss_name == "dino": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + # value = f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + elif loss_name == "vtc": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + # value = f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + elif loss_name == "contrastive": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + pl_module.log( + f"{loss_name}/{phase}/image_accuracy_epoch", + getattr(pl_module, f"{phase}_{loss_name}_image_accuracy").compute(), + sync_dist=True, + ) + pl_module.log( + f"{loss_name}/{phase}/text_accuracy_epoch", + getattr(pl_module, f"{phase}_{loss_name}_text_accuracy").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + getattr(pl_module, f"{phase}_{loss_name}_image_accuracy").reset() + getattr(pl_module, f"{phase}_{loss_name}_text_accuracy").reset() + elif loss_name == "zs_classify": + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + elif loss_name == "mim": + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + else: + value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute() + pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value, sync_dist=True) + getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset() + pl_module.log( + f"{loss_name}/{phase}/loss_epoch", + getattr(pl_module, f"{phase}_{loss_name}_loss").compute(), + sync_dist=True, + ) + getattr(pl_module, f"{phase}_{loss_name}_loss").reset() + + if loss_name == "vcr_q2a": + # print(value, qar_value) + the_metric += qar_value/2 + value/2 + else: + the_metric += value + + pl_module.log(f"{phase}/the_metric", the_metric) + + +def check_non_acc_grad(pl_module): + if pl_module.token_type_embeddings.weight.grad is None: + return True + else: + grad = pl_module.token_type_embeddings.weight.grad + return (grad.sum() == 0).item() + + +def set_task(pl_module): + phase = "train" if pl_module.training else "val" + if phase == "val" and len(pl_module.hparams.config["val_datasets"]) > 0: + # We are using a special dataset for val + pl_module.current_tasks = [ + k for k, v in pl_module.hparams.config["val_loss_names"].items() if v >= 1 + ] + else: + pl_module.current_tasks = [ + k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1 + ] + return + + +def set_schedule(pl_module): + lr = pl_module.hparams.config["learning_rate"] + wd = pl_module.hparams.config["weight_decay"] + + def no_weight_decay(n, p): + no_decay = [ + "bias", + "LayerNorm.bias", + "LayerNorm.weight", + "norm.bias", + "norm.weight", + "norm1.bias", + "norm1.weight", + "norm2.bias", + "norm2.weight", + # Following are for clip + "ln_1.weight", + "ln_2.weight", + "ln_3.weight", + "ln_post.weight", + "ln_pre.weight", + "ln_final.weight", + "embedding", + "logit_scale", + # evl + "temporal_cls_token", + "pemb_t", + "input_lns" + ] + return p.dim() < 2 or any(nd in n for nd in no_decay) + + + head_names = ["vqa_classifier", "nlvr2_classifier"] + lr_mult = pl_module.hparams.config["lr_mult"] + clip_lr_mult = pl_module.hparams.config["clip_lr_mult"] + end_lr = pl_module.hparams.config["end_lr"] + decay_power = pl_module.hparams.config["decay_power"] + optim_type = pl_module.hparams.config["optim_type"] + + names = [n for n, p in pl_module.named_parameters()] + optimizer_grouped_parameters = [ + { + "params": [ + p + for n, p in pl_module.named_parameters() + if not no_weight_decay(n, p) + and not any(bb in n for bb in head_names) + and not any(cp in n for cp in clip_param_keys) + ], + "weight_decay": wd, + "lr": lr, + }, + { + "params": [ + p + for n, p in pl_module.named_parameters() + if no_weight_decay(n, p) + and not any(bb in n for bb in head_names) + and not any(cp in n for cp in clip_param_keys) + ], + "weight_decay": 0.0, + "lr": lr, + }, + { + "params": [ + p + for n, p in pl_module.named_parameters() + if not no_weight_decay(n, p) + and any(bb in n for bb in head_names) + and not any(cp in n for cp in clip_param_keys) + ], + "weight_decay": wd, + "lr": lr * lr_mult, + }, + { + "params": [ + p + for n, p in pl_module.named_parameters() + if no_weight_decay(n, p) and any(bb in n for bb in head_names) + and not any(cp in n for cp in clip_param_keys) + ], + "weight_decay": 0.0, + "lr": lr * lr_mult, + }, + { + "params": [ + p + for n, p in pl_module.named_parameters() + if not no_weight_decay(n, p) + and not any(bb in n for bb in head_names) + and any(cp in n for cp in clip_param_keys) + ], + "weight_decay": wd, + "lr": lr * clip_lr_mult, + }, + { + "params": [ + p + for n, p in pl_module.named_parameters() + if no_weight_decay(n, p) + and not any(bb in n for bb in head_names) + and any(cp in n for cp in clip_param_keys) + ], + "weight_decay": 0.0, + "lr": lr * clip_lr_mult, + }, + ] + + if optim_type == "adamw": + optimizer = torch.optim.AdamW( + optimizer_grouped_parameters, lr=lr, eps=1e-6, betas=(0.9, 0.98)) + elif optim_type == "adam": + optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr) + elif optim_type == "sgd": + optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9) + + if pl_module.trainer.max_steps is None or pl_module.trainer.max_steps == -1: + max_steps = ( + len(pl_module.trainer.datamodule.train_dataloader()) + * pl_module.trainer.max_epochs + // pl_module.trainer.accumulate_grad_batches + ) + else: + max_steps = pl_module.trainer.max_steps + + warmup_steps = pl_module.hparams.config["warmup_steps"] + if isinstance(pl_module.hparams.config["warmup_steps"], float): + warmup_steps = int(max_steps * warmup_steps) + + if decay_power == "cosine": + scheduler = get_cosine_schedule_with_warmup( + optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=max_steps, + ) + else: + scheduler = get_polynomial_decay_schedule_with_warmup( + optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=max_steps, + lr_end=end_lr, + power=decay_power, + ) + sched = {"scheduler": scheduler, "interval": "step"} + + return ( + [optimizer], + [sched], + ) + + +def state_dict_data_parallel_fix(load_state_dict, curr_state_dict): + load_keys = list(load_state_dict.keys()) + curr_keys = list(curr_state_dict.keys()) + + redo_dp = False + undo_dp = False + if not curr_keys[0].startswith('module.') and load_keys[0].startswith('module.'): + undo_dp = True + elif curr_keys[0].startswith('module.') and not load_keys[0].startswith('module.'): + redo_dp = True + + if undo_dp: + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in load_state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + elif redo_dp: + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in load_state_dict.items(): + name = 'module.' + k # remove `module.` + new_state_dict[name] = v + else: + new_state_dict = load_state_dict + return new_state_dict + + +def state_dict_dino_fix(load_state_dict, curr_state_dict): + load_keys = list(load_state_dict.keys()) + curr_keys = list(curr_state_dict.keys()) + + # for k in curr_state_dict.keys(): + # print(k) + print('*'*50) + redo_dp = False + undo_dp = False + dino_dp = False + if not curr_keys[0].startswith('module.') and load_keys[0].startswith('module.'): + undo_dp = True + elif curr_keys[0].startswith('module.') and not load_keys[0].startswith('module.'): + redo_dp = True + elif load_keys[10].startswith('teacher.') or load_keys[10].startswith('student.'): + dino_dp = True + if undo_dp: + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in load_state_dict.items(): + # print(k) + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + elif redo_dp: + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in load_state_dict.items(): + # print(k) + name = 'module.' + k # remove `module.` + new_state_dict[name] = v + elif dino_dp: + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in load_state_dict.items(): + # print(k) + if k[:8] == "student.": + name = "transformer." + k[8:] # remove `student.` + new_state_dict[name] = v + # continue + elif k[:8] == "teacher.": + # name = "transformer." + k[8:] # remove `teacher.` + # new_state_dict[name] = v + continue + else: + new_state_dict[k] = v + else: + for k, v in load_state_dict.items(): + print(k) + new_state_dict = load_state_dict + print('*'*30) + print("new state dict") + print('*'*30) + for k, v in new_state_dict.items(): + print(k) + return new_state_dict + + +def read_load_path(load_path): + if "s3://" in load_path: + assert client is not None, "Failed to init petrel client" + model_bytes = client.get(load_path) + assert load_path is not None, "Read fail from {}".format(load_path) + return io.BytesIO(model_bytes) + else: + return load_path \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/dist_utils.py b/Downstream/multi-modalities-downstream/CoTrain/modules/dist_utils.py new file mode 100644 index 0000000..42a3a7c --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/dist_utils.py @@ -0,0 +1,270 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" + +import functools +import logging +import numpy as np +import pickle +import torch +import torch.distributed as dist + +import torch + +_LOCAL_PROCESS_GROUP = None +""" +A torch process group which only includes processes that on the same machine as the current process. +This variable is set when processes are spawned by `launch()` in "engine/launch.py". +""" + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def _serialize_to_tensor(data, group): + backend = dist.get_backend(group) + assert backend in ["gloo", "nccl"] + device = torch.device("cpu" if backend == "gloo" else "cuda") + + buffer = pickle.dumps(data) + if len(buffer) > 1024 ** 3: + logger = logging.getLogger(__name__) + logger.warning( + "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( + get_rank(), len(buffer) / (1024 ** 3), device + ) + ) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device=device) + return tensor + + +def _pad_to_largest_tensor(tensor, group): + """ + Returns: + list[int]: size of the tensor, on each rank + Tensor: padded tensor that has the max size + """ + world_size = dist.get_world_size(group=group) + assert ( + world_size >= 1 + ), "comm.gather/all_gather must be called from ranks within the given group!" + local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) + size_list = [ + torch.zeros([1], dtype=torch.int64, device=tensor.device) + for _ in range(world_size) + ] + dist.all_gather(size_list, local_size, group=group) + size_list = [int(size.item()) for size in size_list] + + max_size = max(size_list) + + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + if local_size != max_size: + padding = torch.zeros( + (max_size - local_size,), dtype=torch.uint8, device=tensor.device + ) + tensor = torch.cat((tensor, padding), dim=0) + return size_list, tensor + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return [data] + + tensor = _serialize_to_tensor(data, group) + + size_list, tensor = _pad_to_largest_tensor(tensor, group) + max_size = max(size_list) + + # receiving Tensor from all ranks + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) + for _ in size_list + ] + dist.all_gather(tensor_list, tensor, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group=group) == 1: + return [data] + rank = dist.get_rank(group=group) + + tensor = _serialize_to_tensor(data, group) + size_list, tensor = _pad_to_largest_tensor(tensor, group) + + # receiving Tensor from all ranks + if rank == dst: + max_size = max(size_list) + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) + for _ in size_list + ] + dist.gather(tensor, tensor_list, dst=dst, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + return data_list + else: + dist.gather(tensor, [], dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2 ** 31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """ + Reduce the values in the dictionary from all processes so that process with rank + 0 has the reduced results. + + Args: + input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/forzen_param.py b/Downstream/multi-modalities-downstream/CoTrain/modules/forzen_param.py new file mode 100644 index 0000000..f73375d --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/forzen_param.py @@ -0,0 +1,19 @@ +import torch + + +# def forzen_param(model): +# for name, param in model.named_parameters(): +# if 'mlm_score' in name or 'vtm_score' in name or 'mpp_score' in name: +# param.requires_grad = True +# else: +# param.requires_grad = False +# return True + + +def forzen_param(model): + flag = False + for name, param in model.named_parameters(): + if '10' in name: + flag = True + param.requires_grad = flag + return True \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/heads.py b/Downstream/multi-modalities-downstream/CoTrain/modules/heads.py new file mode 100644 index 0000000..951daf2 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/heads.py @@ -0,0 +1,56 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from transformers.models.bert.modeling_bert import BertPredictionHeadTransform + + +class Pooler(nn.Module): + def __init__(self, hidden_size): + super().__init__() + self.dense = nn.Linear(hidden_size, hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # print(hidden_states.size()) # 64 x 237 x 768 + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class vtmHead(nn.Module): + def __init__(self, hidden_size): + super().__init__() + self.fc = nn.Linear(hidden_size, 2) + + def forward(self, x): + x = self.fc(x) + return x + + +class MLMHead(nn.Module): + def __init__(self, config, weight=None): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + if weight is not None: + self.decoder.weight = weight + + def forward(self, x): + x = self.transform(x) + x = self.decoder(x) + self.bias + return x + + +class MPPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + self.decoder = nn.Linear(config.hidden_size, 256 * 3) + + def forward(self, x): + x = self.transform(x) + x = self.decoder(x) + return x diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/objectives.py b/Downstream/multi-modalities-downstream/CoTrain/modules/objectives.py new file mode 100644 index 0000000..ee9d891 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/objectives.py @@ -0,0 +1,1491 @@ +import random +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +import os +import glob +import json +import tqdm +import functools +import itertools +from torch.utils.data.distributed import DistributedSampler +import torch.distributed.nn as distnn +from einops import rearrange, repeat + +from CoTrain.modules.dist_utils import all_gather +from CoTrain.modules.retrieval_metrics import t2v_metrics, v2t_metrics + + +def cost_matrix_cosine(x, y, eps=1e-5): + """Compute cosine distnace across every pairs of x, y (batched) + [B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]""" + assert x.dim() == y.dim() + assert x.size(0) == y.size(0) + assert x.size(2) == y.size(2) + x_norm = F.normalize(x, p=2, dim=-1, eps=eps) + y_norm = F.normalize(y, p=2, dim=-1, eps=eps) + cosine_sim = x_norm.matmul(y_norm.transpose(1, 2)) + cosine_dist = 1 - cosine_sim + return cosine_dist + + +def trace(x): + """ compute trace of input tensor (batched) """ + b, m, n = x.size() + assert m == n + mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x) + trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False) + return trace + + +@torch.no_grad() +def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k): + """ [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]""" + b, m, n = C.size() + sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1) + T = torch.ones(b, n, m, dtype=C.dtype, device=C.device) + A = torch.exp(-C.transpose(1, 2) / beta) + + # mask padded positions + sigma.masked_fill_(x_pad, 0) + joint_pad = joint_pad.transpose(1, 2) + T.masked_fill_(joint_pad, 0) + A.masked_fill_(joint_pad, 0) + + # broadcastable lengths + x_len = x_len.unsqueeze(1).unsqueeze(2) + y_len = y_len.unsqueeze(1).unsqueeze(2) + + # mask to zero out padding in delta and sigma + x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1) + y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1) + + for _ in range(iteration): + Q = A * T # bs * n * m + sigma = sigma.view(b, m, 1) + for _ in range(k): + delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask) + sigma = 1 / (x_len * delta.matmul(Q) + x_mask) + T = delta.view(b, n, 1) * Q * sigma + T.masked_fill_(joint_pad, 0) + return T + + +def optimal_transport_dist( + txt_emb, img_emb, txt_pad, img_pad, beta=0.5, iteration=50, k=1 +): + """ [B, M, D], [B, N, D], [B, M], [B, N]""" + cost = cost_matrix_cosine(txt_emb, img_emb) + # mask the padded inputs + joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2) + cost.masked_fill_(joint_pad, 0) + + txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype) + img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to(dtype=cost.dtype) + + T = ipot( + cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k + ) + distance = trace(cost.matmul(T.detach())) + return distance + + +def compute_mlm(pl_module, batch, infer=None, mode="video"): + if infer is None: + infer = pl_module.infer(batch, mask_text=True, mode=mode) + mlm_logits = pl_module.mlm_score(infer["text_feats"]) + mlm_labels = infer["text_labels"] + + mlm_loss = F.cross_entropy( + mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]), + mlm_labels.view(-1), + ignore_index=-100, + ) + + ret = { + "mlm_loss": mlm_loss, + "mlm_logits": mlm_logits, + "mlm_labels": mlm_labels, + "mlm_ids": infer["text_ids"], + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"]) + acc = getattr(pl_module, f"{phase}_mlm_accuracy")( + ret["mlm_logits"].view(-1, pl_module.hparams.config["vocab_size"]), + ret["mlm_labels"].view(-1) + ) + pl_module.log(f"mlm/{phase}/loss", loss) + pl_module.log(f"mlm/{phase}/accuracy", acc) + + return ret + +def sim_matrix(a, b, eps=1e-8): + """ + added eps for numerical stability + """ + a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None] + a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n)) + b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n)) + sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1)) + return sim_mt + +# == end + + +# add independent contrastive loss for retrieval + +def compute_vtc(pl_module, batch, mode="video"): + infer_text = pl_module.infer(batch, mask_text=False, mask_video=False, input_text_only=True, mode=mode) + with torch.cuda.amp.autocast(enabled=False): + txt_emb = infer_text["text_feats"] + infer_vision = pl_module.infer(batch, mask_text=False, mask_video=False, input_video_only=True, mode=mode) + with torch.cuda.amp.autocast(enabled=False): + img_emb = infer_vision["video_feats"] + # print(txt_emb.size(), img_emb.size()) + x = sim_matrix(txt_emb[:, 0], img_emb[:, 0]) + temperature = 0.05 + "Assumes input x is similarity matrix of N x M \in [-1, 1], computed using the cosine similarity between normalised vectors" + i_logsm = F.log_softmax(x / temperature, dim=1) + j_logsm = F.log_softmax(x.t() / temperature, dim=1) + + # sum over positives + idiag = torch.diag(i_logsm) + loss_i = idiag.sum() / len(idiag) + + jdiag = torch.diag(j_logsm) + loss_j = jdiag.sum() / len(jdiag) + + itc_loss = - loss_i - loss_j + + ret = { + "vtc_loss": itc_loss, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_vtc_loss")(ret["vtc_loss"]) + pl_module.log(f"vtc/{phase}/loss", loss) + + return ret + +# == end + +def compute_vtm_wpa(pl_module, batch, mode="video"): + pos_len = len(batch["text"]) // 2 + neg_len = len(batch["text"]) - pos_len + vtm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to( + pl_module.device + ) + vtm_labels = vtm_labels[torch.randperm(vtm_labels.size(0))] + + # print(batch.keys()) + + vtm_videos = [ + torch.stack( + [ + ti if vtm_labels[i] == 1 else fi + for i, (ti, fi) in enumerate(zip(bti, bfi)) + ] + ) + for bti, bfi in zip(batch["video"], batch["false_video_0"]) + ] + + batch = {k: v for k, v in batch.items()} + batch["video"] = vtm_videos + + infer = pl_module.infer(batch, mask_text=False, mask_video=False, mode=mode) + + with torch.cuda.amp.autocast(enabled=False): + txt_emb, img_emb = infer["text_feats"], infer["video_feats"] + txt_mask, img_mask = infer["text_masks"].bool(), infer["video_masks"].bool() + for i, _len in enumerate(txt_mask.sum(dim=1)): + txt_mask[i, _len - 1] = False + txt_mask[:, 0] = False + img_mask[:, 0] = False + if "deit" in pl_module.hparams.config["vit"]: + img_mask[:, 1] = False + txt_pad, img_pad = ~txt_mask, ~img_mask + + cost = cost_matrix_cosine(txt_emb.float(), img_emb.float()) + joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2) + cost.masked_fill_(joint_pad, 0) + + txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to( + dtype=cost.dtype + ) + img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to( + dtype=cost.dtype + ) + T = ipot( + cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1 + ) + distance = trace(cost.matmul(T.detach())) + + dist_pos = distance.masked_select(vtm_labels == 1) + dist_neg = distance.masked_select(vtm_labels == 0) + ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0)) + + vtm_logits = pl_module.vtm_score(infer["cls_feats"]) + vtm_loss = F.cross_entropy(vtm_logits, vtm_labels.long()) + + ret = { + "vtm_loss": vtm_loss, + "vtm_wpa_loss": 0.1 * ot_loss, + "vtm_logits": vtm_logits, + "vtm_labels": vtm_labels, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_vtm_loss")(ret["vtm_loss"]) + wpa_loss = getattr(pl_module, f"{phase}_vtm_wpa_loss")(ret["vtm_wpa_loss"]) + acc = getattr(pl_module, f"{phase}_vtm_accuracy")( + ret["vtm_logits"], ret["vtm_labels"] + ) + pl_module.log(f"vtm/{phase}/loss", loss) + pl_module.log(f"vtm/{phase}/wpa_loss", wpa_loss) + pl_module.log(f"vtm/{phase}/accuracy", acc) + + return ret + + +def compute_imgcls(pl_module, batch): + infer = pl_module.infer(batch, mask_text=False, mask_video=False) + imgcls_logits = pl_module.img_classifier(infer["cls_feats"]) + imgcls_labels = batch["label"] + imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long() + imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels) + + ret = { + "imgcls_loss": imgcls_loss, + "imgcls_logits": imgcls_logits, + "imgcls_labels": imgcls_labels, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"]) + acc = getattr(pl_module, f"{phase}_imgcls_accuracy")( + ret["imgcls_logits"], ret["imgcls_labels"] + ) + pl_module.log(f"imgcls/{phase}/loss", loss) + pl_module.log(f"imgcls/{phase}/accuracy", acc) + + return ret + + +# vcr q -> a +def compute_vcr_q2a(pl_module, batch): + false_len = pl_module.hparams.config["draw_options_text"] - 1 + vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long() + _bs, _t, _c, _h, _w = batch["video"][0].shape + # for qa + text_ids = torch.stack( + [batch[f"options_text_{i}_ids"] for i in range(false_len)], dim=1 + ) + text_masks = torch.stack( + [batch[f"options_text_{i}_masks"] for i in range(false_len)], dim=1 + ) + text_labels = torch.stack( + [batch[f"options_text_{i}_labels"] for i in range(false_len)], dim=1 + ) + + # concat first option and other options + text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) + text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) + text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) + videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w) + + infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + score = pl_module.rank_output(infer["cls_feats"])[:, 0] + score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) + qa_loss = F.cross_entropy(score, vtm_labels) + # for qa->r + + reason_len = pl_module.hparams.config["draw_options_text"] + qar_labels = torch.tensor(batch["reason_answer"]).to(pl_module.device).long() + _bs, _t, _c, _h, _w = batch["video"][0].shape + # for qar + qar_text_ids = torch.stack( + [batch[f"qar_text_{i}_ids"] for i in range(reason_len)], dim=1 + ) + qar_text_masks = torch.stack( + [batch[f"qar_text_{i}_masks"] for i in range(reason_len)], dim=1 + ) + qar_text_labels = torch.stack( + [batch[f"qar_text_{i}_labels"] for i in range(reason_len)], dim=1 + ) + + # concat first option and other options + videos = batch["video"][0].unsqueeze(1).expand(_bs, reason_len, _t, _c, _h, _w) + + qar_infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(qar_text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(qar_text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(qar_text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + qar_score = pl_module.rank_output_2(qar_infer["cls_feats"])[:, 0] + qar_score = rearrange(qar_score, "(bs fs) -> bs fs", bs=_bs, fs=reason_len) + qar_loss = F.cross_entropy(qar_score, qar_labels) + + # print(score, vtm_labels) + phase = "train" if pl_module.training else "val" + qa_acc = getattr(pl_module, f"{phase}_vcr_q2a_accuracy")( + score, vtm_labels + ) + qar_acc = getattr(pl_module, f"{phase}_vcr_qar_accuracy")( + qar_score, qar_labels + ) + + ret = { + "vcr_q2a_loss": qa_loss, + "vcr_qar_loss": qar_loss + } + + phase = "train" if pl_module.training else "val" + qa_loss = getattr(pl_module, f"{phase}_vcr_q2a_loss")(ret["vcr_q2a_loss"]) + qar_loss = getattr(pl_module, f"{phase}_vcr_qar_loss")(ret["vcr_qar_loss"]) + + pl_module.log(f"vcr_q2a/{phase}/loss", qa_loss) + pl_module.log(f"vcr_qar/{phase}/loss", qar_loss) + pl_module.log(f"vcr_q2a/{phase}/accuracy", qa_acc) + pl_module.log(f"vcr_qar/{phase}/accuracy", qar_acc) + return ret + + +# vcr qa -> r +def compute_vcr_qa2r(pl_module, batch): + false_len = pl_module.hparams.config["draw_false_text"] - 1 + # stack video multiple times + # print(batch["answer"]) + vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long() + _bs, _t, _c, _h, _w = batch["video"][0].shape + # print(batch.keys()) + + text_ids = torch.stack( + [batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1 + ) + text_masks = torch.stack( + [batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1 + ) + text_labels = torch.stack( + [batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1 + ) + + # concat first option and other options + text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) + text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) + text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) + videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w) + + infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + score = pl_module.rank_output(infer["cls_feats"])[:, 0] + score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) + loss = F.cross_entropy(score, vtm_labels) + + # print(score, vtm_labels) + + phase = "train" if pl_module.training else "val" + acc = getattr(pl_module, f"{phase}_multiple_choice_accuracy")( + score, vtm_labels + ) + + ret = { + "multiple_choice_loss": loss, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_multiple_choice_loss")(ret["multiple_choice_loss"]) + + pl_module.log(f"multiple_choice/{phase}/loss", loss) + pl_module.log(f"multiple_choice/{phase}/accuracy", acc) + return ret + + +# mc_vqa +def compute_mc_vqa_q2a(pl_module, batch): + false_len = pl_module.hparams.config["draw_options_text"] - 1 + vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long() + _bs, _t, _c, _h, _w = batch["video"][0].shape + # for qa + text_ids = torch.stack( + [batch[f"options_text_{i}_ids"] for i in range(false_len)], dim=1 + ) + text_masks = torch.stack( + [batch[f"options_text_{i}_masks"] for i in range(false_len)], dim=1 + ) + text_labels = torch.stack( + [batch[f"options_text_{i}_labels"] for i in range(false_len)], dim=1 + ) + + # concat first option and other options + text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) + text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) + text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) + videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w) + + infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + ## v0: use rank output + # score = pl_module.rank_output(infer["cls_feats"])[:, 0] + ## v1: use classification head + # print(infer["cls_feats"].size()) # 40, 768 + score = pl_module.mc_vqa_classifier(infer["cls_feats"])[:, 0] + score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) + qa_loss = F.cross_entropy(score, vtm_labels) + # print(score, vtm_labels) + phase = "train" if pl_module.training else "val" + qa_acc = getattr(pl_module, f"{phase}_mc_vqa_accuracy")( + score, vtm_labels + ) + ret = { + "mc_vqa_loss": qa_loss, + } + + phase = "train" if pl_module.training else "val" + qa_loss = getattr(pl_module, f"{phase}_mc_vqa_loss")(ret["mc_vqa_loss"]) + pl_module.log(f"mc_vqa/{phase}/loss", qa_loss) + pl_module.log(f"mc_vqa/{phase}/accuracy", qa_acc) + return ret + + +# msrvtt multiple choice +def compute_multiple_choice(pl_module, batch): + false_len = pl_module.hparams.config["draw_false_text"] - 1 + # stack image multiple times + # print(batch["answer"]) + vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long() + _bs, _t, _c, _h, _w = batch["video"][0].shape + # print(batch.keys()) + + texts = [batch[f"false_text_{i}"] for i in range(false_len)] + text_ids = torch.stack( + [batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1 + ) + text_masks = torch.stack( + [batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1 + ) + text_labels = torch.stack( + [batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1 + ) + + # concat first option and other options + texts = [batch["text"]] + texts + text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) + text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) + text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) + + if "cotrain" in type(pl_module).__name__.lower(): + videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w) + infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + score = pl_module.rank_output(infer["cls_feats"])[:, 0] + score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) + elif "clip" in type(pl_module).__name__.lower(): + if pl_module.mc_type == "vtc": + videos = batch["video"][0] + infer = pl_module.infer( + { + "video": [videos], + "text": [x for y in zip(*texts) for x in y] + } + ) + video_feats = infer["video_feats"] # 8 * 512 + text_feats = infer["text_feats"] # 40 * 512 + + video_feats = video_feats / video_feats.norm(dim=1, keepdim=True) + text_feats = text_feats / text_feats.norm(dim=1, keepdim=True) + + text_feats = rearrange(text_feats, "(bs fs) c -> bs fs c", bs=_bs, fs=false_len + 1) + score = torch.einsum("bc,bfc->bf", video_feats, text_feats) * pl_module.clip.logit_scale.exp() + # elif pl_module.mc_type == "vtc": + # videos = batch["video"][0] + # scores = [] + # for _ in range(int(round(1 / ((1 - pl_module.hparams.config["mim_prob"]))))): + # infer = pl_module.infer( + # { + # "video": [videos], + # "text": [x for y in zip(*texts) for x in y] + # }, + # mask_video=True, + # ) + # video_feats = infer["video_feats"] # 8 * 512 + # text_feats = infer["text_feats"] # 40 * 512 + + # video_feats = video_feats / video_feats.norm(dim=1, keepdim=True) + # text_feats = text_feats / text_feats.norm(dim=1, keepdim=True) + + # text_feats = rearrange(text_feats, "(bs fs) c -> bs fs c", bs=_bs, fs=false_len + 1) + # score = torch.einsum("bc,bfc->bf", video_feats, text_feats) * pl_module.clip.logit_scale.exp() + # scores.append(score) + # score = sum([x.softmax(dim=-1) for x in scores]) / len(scores) + elif pl_module.mc_type == "vtc_cap": + videos = batch["video"][0] + videos = repeat(videos, 'b t c h w -> (b fs) t c h w', fs=false_len + 1) + infer = pl_module.infer( + { + "video": [videos], + "text": [x for y in zip(*texts) for x in y] + }, + caption=True, + ) + feats = infer["cap_logits"] + feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)] + mc_logits = pl_module.rank_output( + torch.cat([feats, infer["video_feats"], infer["text_feats"]], dim=1)) + score = mc_logits.reshape(_bs, false_len + 1) + else: + raise NotImplementedError("Not implemented for model {}".format(pl_module)) + + loss = F.cross_entropy(score, vtm_labels) + + # print(score, itm_labels) + + phase = "train" if pl_module.training else "val" + acc = getattr(pl_module, f"{phase}_multiple_choice_accuracy")( + score, vtm_labels + ) + # print(acc) + ret = { + "multiple_choice_loss": loss, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_multiple_choice_loss")(ret["multiple_choice_loss"]) + + pl_module.log(f"multiple_choice/{phase}/loss", loss) + pl_module.log(f"multiple_choice/{phase}/accuracy", acc) + return ret + + +def compute_vqa(pl_module, batch): + infer = pl_module.infer(batch, mask_text=False, mask_video=False) + vqa_logits = pl_module.vqa_classifier(infer["cls_feats"]) + vqa_targets = torch.zeros( + len(vqa_logits), pl_module.hparams.config["vqav2_label_size"] + ).to(pl_module.device) + + vqa_labels = batch["vqa_labels"] + vqa_scores = batch["vqa_scores"] + + for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)): + for l, s in zip(_label, _score): + vqa_targets[i, l] = s + + vqa_loss = ( + F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets) + * vqa_targets.shape[1] + ) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19 + + ret = { + "vqa_loss": vqa_loss, + "vqa_logits": vqa_logits, + "vqa_targets": vqa_targets, + "vqa_labels": vqa_labels, + "vqa_scores": vqa_scores, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"]) + score = getattr(pl_module, f"{phase}_vqa_score")( + ret["vqa_logits"], ret["vqa_targets"] + ) + pl_module.log(f"vqa/{phase}/loss", loss) + pl_module.log(f"vqa/{phase}/score", score) + + return ret + + +# add by vcop +def compute_vcop(pl_module, batch): + infer = pl_module.infer(batch, mask_text=False, mask_video=False) + x = infer["vcop_features"] # BTLC + b = x.size(0) + # # v1: simple concat + # gt_labels = torch.ones(b) + # idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order + # classes = list(itertools.permutations(list(range(len(idx.tolist()))))) + # label = classes.index(tuple(idx.tolist())) + # h = x[0, idx, 0].view(1, -1) + # gt_labels[0] = label + # for index in range(1, b): + # idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order + # classes = list(itertools.permutations(list(range(len(idx.tolist()))))) + # label = classes.index(tuple(idx.tolist())) + # gt_labels[index] = label + # h = torch.cat((h, x[index, idx, 0].view(1, -1)), dim=0) + + # v2: vcop implementation + gt_labels = torch.ones(b) + idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order + classes = list(itertools.permutations(list(range(len(idx.tolist()))))) + label = classes.index(tuple(idx.tolist())) + h = x[0, idx, 0].unsqueeze(0) + gt_labels[0] = label + for index in range(1, b): + idx = torch.randperm(pl_module.hparams.config["num_frames"]) # get random order + classes = list(itertools.permutations(list(range(len(idx.tolist()))))) + label = classes.index(tuple(idx.tolist())) + gt_labels[index] = label + h = torch.cat((h, x[index, idx, 0].unsqueeze(0)), dim=0) + vcop_logits = pl_module.vcop_classifier(h) + vcop_labels = gt_labels.to(pl_module.device).long() + m = nn.Softmax(dim=1) + if random.random() < 0.01: + print(m(vcop_logits)[0], vcop_labels[0]) + # print(vcop_labels) + vcop_loss = F.cross_entropy(vcop_logits, vcop_labels) + ret = { + "vcop_loss": vcop_loss, + "vcop_logits": vcop_logits, + "vcop_labels": vcop_labels, + } + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_vcop_loss")(ret["vcop_loss"]) + pl_module.log(f"vcop/{phase}/loss", loss) + acc = getattr(pl_module, f"{phase}_vcop_accuracy")( + ret["vcop_logits"], ret["vcop_labels"], unfilterd=False # if remove unknown classes + ) + # print(acc) + pl_module.log(f"vcop/{phase}/accuracy", acc) + return ret + + +# add for dino +def compute_dino(pl_module, batch): + infer = pl_module.infer(batch, mask_text=False, mask_video=False) + x = infer["dino_features"] # BTLC + b = x.size(0) + dino_loss = pl_module.dino_loss + ret = { + "dino_loss": dino_loss, + "dino_logits": dino_logits, + "dino_labels": dino_labels, + } + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_dino_loss")(ret["dino_loss"]) + pl_module.log(f"dino/{phase}/loss", loss) + acc = getattr(pl_module, f"{phase}_dino_accuracy")( + ret["dino_logits"], ret["dino_labels"], unfilterd=False # if remove unknown classes + ) + pl_module.log(f"dino/{phase}/accuracy", acc) + return ret + +# add by msrvtt qa +def compute_openend_vqa(pl_module, batch): + phase = "train" if pl_module.training else "val" + # batch["false_video_0"] = batch["false_video_0"][0] + if "allinone" in type(pl_module).__name__.lower(): + batch["video"] = batch["video"][0] + infer = pl_module.infer(batch, mask_text=False, mask_video=False, mode="video") + vqa_logits = pl_module.vqa_classifier(infer["cls_feats"]) + elif "clip" in type(pl_module).__name__.lower(): + if pl_module.qa_type == "vtc": + infer = pl_module.infer(batch, mask_text=False, mask_video=False, mode="video") + vqa_logits = pl_module.vqa_classifier( + torch.cat([infer["video_feats"], infer["text_feats"]], dim=1) + ) + elif pl_module.qa_type == "cap": + infer = pl_module.infer(batch, mask_text=False, mask_video=False, caption=True, mode="video") + # Take the feats of the eot_token + feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)] + vqa_logits = pl_module.vqa_classifier(feats) + elif pl_module.qa_type == "vtc_cap": + infer = pl_module.infer(batch, mask_text=False, mask_video=False, caption=True, mode="video") + feats = infer["cap_logits"] + feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)] + vqa_logits = pl_module.vqa_classifier( + torch.cat([feats, infer["video_feats"], infer["text_feats"]], dim=1)) + elif pl_module.qa_type == "vtc_mlm": + del batch["clip_text_ids"] + assert "clip_text_ids" not in batch + batch["text"] = [f"Question: {q} Answer: " for q in batch["text"]] + infer = pl_module.infer(batch, mask_text=True, mask_video=False, mode="video") + # vqa_logits = pl_module.mlm_score(infer["text_feats"]) + + # vqa_logits = vqa_logits[torch.arange(vqa_logits.shape[0]), infer["text_ids"].argmax(dim=-1)] + + # id_idxes = batch["ans_clip_id"][0] + # vqa_logits = vqa_logits[:, id_idxes] + + feats = infer["text_feats"] + feats = feats[torch.arange(feats.shape[0]), infer["text_ids"].argmax(dim=-1)] + vqa_logits = pl_module.vqa_classifier( + torch.cat([feats, infer["video_feats"], infer["text_contrastive_feats"]], dim=1) + ) + # vqa_logits = (vqa_logits + vqa_logits_all[:, :vqa_logits.size(1)]) / 2 + elif pl_module.qa_type in ["zs", "mlm"]: + del batch["clip_text_ids"] + assert "clip_text_ids" not in batch + batch["text"] = [f"Question: {q} Answer: " for q in batch["text"]] + infer = pl_module.infer(batch, mask_text=True, mask_video=False, mode="video") + vqa_logits = pl_module.mlm_score(infer["text_feats"]) + + vqa_logits = vqa_logits[torch.arange(vqa_logits.shape[0]), infer["text_ids"].argmax(dim=-1)] + + id_idxes = batch["ans_clip_id"][0] + vqa_logits = vqa_logits[:, id_idxes] + else: + raise NotImplementedError("Not implemented for model {}".format(pl_module)) + vqa_labels = torch.tensor(batch["vqa_labels"]).to(pl_module.device).long() + # print(vqa_logits.size()) + # print(vqa_labels) + vqa_loss = F.cross_entropy(vqa_logits, vqa_labels) + ret = { + "vqa_loss": vqa_loss, + "vqa_logits": vqa_logits, + "vqa_labels": vqa_labels, + } + loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"]) + pl_module.log(f"vqa/{phase}/loss", loss) + acc = getattr(pl_module, f"{phase}_openend_vqa_accuracy")( + ret["vqa_logits"].clone(), ret["vqa_labels"].clone() # if remove unknown classes + ) + pl_module.log(f"vqa/{phase}/accuracy", acc) + return ret + + +def compute_nlvr2(pl_module, batch): + infer1 = pl_module.infer( + batch, mask_text=False, mask_video=False, video_token_type_idx=1 + ) + infer2 = pl_module.infer( + batch, mask_text=False, mask_video=False, video_token_type_idx=2 + ) + + cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1) + nlvr2_logits = pl_module.nlvr2_classifier(cls_feats) + + nlvr2_labels = batch["answers"] + nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long() + nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels) + + ret = { + "nlvr2_loss": nlvr2_loss, + "nlvr2_logits": nlvr2_logits, + "nlvr2_labels": nlvr2_labels, + } + + phase = "train" if pl_module.training else "val" + + if phase == "train": + loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"]) + acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")( + ret["nlvr2_logits"], ret["nlvr2_labels"] + ) + pl_module.log(f"nlvr2/{phase}/loss", loss) + pl_module.log(f"nlvr2/{phase}/accuracy", acc) + else: + dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n] + test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n] + + if dev_batches: + dev_loss = getattr(pl_module, f"dev_nlvr2_loss")( + F.cross_entropy( + ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches] + ) + ) + dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")( + ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches] + ) + pl_module.log(f"nlvr2/dev/loss", dev_loss) + pl_module.log(f"nlvr2/dev/accuracy", dev_acc) + if test_batches: + test_loss = getattr(pl_module, f"test_nlvr2_loss")( + F.cross_entropy( + ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches] + ) + ) + test_acc = getattr(pl_module, f"test_nlvr2_accuracy")( + ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches] + ) + pl_module.log(f"nlvr2/test/loss", test_loss) + pl_module.log(f"nlvr2/test/accuracy", test_acc) + + return ret + + +def compute_irtr(pl_module, batch): + is_training_phase = pl_module.training + # modify to module + _bs, _t, _c, _h, _w = batch["video"][0].shape + false_len = pl_module.hparams.config["draw_false_text"] + text_ids = torch.stack( + [batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1 + ) + text_masks = torch.stack( + [batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1 + ) + text_labels = torch.stack( + [batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1 + ) + + text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) + text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) + text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) + videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w) + + infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + score = pl_module.rank_output(infer["cls_feats"])[:, 0] + score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) + answer = torch.zeros(_bs).to(score).long() + irtr_loss = F.cross_entropy(score, answer) + + ret = { + "irtr_loss": irtr_loss, + } + + phase = "train" if pl_module.training else "val" + irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"]) + + pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss) + + return ret + + +# use this method to achievt multiple view testing +@torch.no_grad() +def compute_irtr_recall(pl_module): + text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset() + text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer + text_loader = torch.utils.data.DataLoader( + text_dset, + batch_size=64, + num_workers=pl_module.hparams.config["num_workers"], + pin_memory=True, + collate_fn=functools.partial( + text_dset.collate, + mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator, + ), + ) + + video_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset( + video_only=True + ) + video_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer + dist_sampler = DistributedSampler(video_dset, shuffle=False) + video_loader = torch.utils.data.DataLoader( + video_dset, + batch_size=1, + num_workers=pl_module.hparams.config["num_workers"], + sampler=dist_sampler, + pin_memory=True, + collate_fn=functools.partial( + video_dset.collate, + mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator, + ), + ) + + text_preload = list() + for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"): + text_preload.append( + { + "text_ids": _b["text_ids"].to(pl_module.device), + "text_masks": _b["text_masks"].to(pl_module.device), + "text_labels": _b["text_labels"].to(pl_module.device), + "img_index": _b["img_index"], + } + ) + + tiids = list() + for pre in text_preload: + tiids += pre["img_index"] + tiids = torch.tensor(tiids) + + video_preload = list() + for _b in tqdm.tqdm(video_loader, desc="video prefetch loop"): + video = _b["video"][0] + # print(video.size()) + (ie, im, _, _) = pl_module.transformer.visual_embed( + video.to(pl_module.device), + max_video_len=pl_module.hparams.config["max_video_len"], + mask_it=False, + ) + video_preload.append((ie, im, _b["img_index"][0])) + + rank_scores = list() + rank_iids = list() + + for img_batch in tqdm.tqdm(video_preload, desc="rank loop"): + _ie, _im, _iid = img_batch + num_frames, l, c = _ie.shape + + # print(_ie.size()) # 1x197x168 + # print(_im.size()) # 1x197 + _ie.unsqueeze(0) + _im.unsqueeze(0) + img_batch_score = list() + for txt_batch in text_preload: + fblen = len(txt_batch["text_ids"]) + ie = _ie.expand(fblen, num_frames, l, c) + # print(ie.size()) + im = _im.expand(fblen, num_frames, l) + ie = ie.contiguous().view(-1, l, c) + im = im.contiguous().view(-1, l) + + with torch.cuda.amp.autocast(): + score = pl_module.rank_output( + pl_module.infer( + { + "text_ids": txt_batch["text_ids"], + "text_masks": txt_batch["text_masks"], + "text_labels": txt_batch["text_labels"], + }, + video_embeds=ie, + v_masks=im, + )["cls_feats"] + )[:, 0] + + img_batch_score.append(score) + + img_batch_score = torch.cat(img_batch_score) + rank_scores.append(img_batch_score.cpu().tolist()) + rank_iids.append(_iid) + + torch.distributed.barrier() + gather_rank_scores = all_gather(rank_scores) + gather_rank_iids = all_gather(rank_iids) + + iids = torch.tensor(gather_rank_iids) + iids = iids.view(-1) + scores = torch.tensor(gather_rank_scores) + scores = scores.view(len(iids), -1) + + topk10 = scores.topk(10, dim=1) + topk5 = scores.topk(5, dim=1) + topk1 = scores.topk(1, dim=1) + topk10_iids = tiids[topk10.indices] + topk5_iids = tiids[topk5.indices] + topk1_iids = tiids[topk1.indices] + + tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean() + tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean() + tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean() + + topk10 = scores.topk(10, dim=0) + topk5 = scores.topk(5, dim=0) + topk1 = scores.topk(1, dim=0) + topk10_iids = iids[topk10.indices] + topk5_iids = iids[topk5.indices] + topk1_iids = iids[topk1.indices] + + ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean() + ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean() + ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean() + + return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) + + +@torch.no_grad() +def compute_decouple_irtr_recall(pl_module): + sample_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset( + ) + sample_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer + dist_sampler = DistributedSampler(sample_dset, shuffle=False) + sample_loader = torch.utils.data.DataLoader( + sample_dset, + batch_size=1, + num_workers=pl_module.hparams.config["num_workers"], + sampler=dist_sampler, + pin_memory=True, + collate_fn=functools.partial( + sample_dset.collate, + mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator, + ), + ) + + text_preload = list() + text_embed_arr = [] + vid_embed_arr = [] + count = 0 + with torch.no_grad(): + for _b in tqdm.tqdm(sample_loader, desc="text&video prefetch loop"): + # print(_b) + # print(_b.keys()) + _b["text_ids"] = _b["text_ids"].to(pl_module.device) + _b["text_masks"] = _b["text_masks"].to(pl_module.device) + _b["text_labels"] = _b["text_labels"].to(pl_module.device) + _b["video"][0] = _b["video"][0].to(pl_module.device) + + infer = pl_module.infer(_b, mask_text=False, mask_video=False) + with torch.cuda.amp.autocast(enabled=False): + text_embed, vid_embed = infer["text_retrieval_feats"], infer["video_retrieval_feats"] + if vid_embed is not None: + vid_embed_all = [torch.zeros_like(vid_embed) for _ in range(pl_module.hparams.config["num_gpus"])] + torch.distributed.all_gather(vid_embed_all, vid_embed) + vid_embed_all = torch.cat(vid_embed_all, dim=0) + if text_embed is not None: + text_embed_all = [torch.zeros_like(text_embed) for _ in range(pl_module.hparams.config["num_gpus"])] + torch.distributed.all_gather(text_embed_all, text_embed) + text_embed_all = torch.cat(text_embed_all, dim=0) + text_embed_arr.append(text_embed_all.cpu()) + vid_embed_arr.append(vid_embed_all.cpu()) + count += 1 + text_embeds = torch.cat(text_embed_arr) + vid_embeds = torch.cat(vid_embed_arr) + # print(text_embeds.size(), vid_embeds.size()) + st2sv_sims = sim_matrix(text_embeds, vid_embeds).detach().cpu().numpy() + for metric in [t2v_metrics, v2t_metrics]: + metric_name = metric.__name__ + metrics = metric(st2sv_sims) + if metric == t2v_metrics: + tr_r1, tr_r5, tr_r10, tr_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"] + else: + ir_r1, ir_r5, ir_r10, ir_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"] + # msg += f"MedR: {metrics['MedR']:g}, MeanR: {metrics['MeanR']:.1f}" + return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) + + +@torch.no_grad() +def compute_zero_shot_classify_recall(pl_module, batch): + # process all prompt action label into text representations + false_len = pl_module.hparams.config["draw_false_text"] - 1 + # stack video multiple times + # print(batch["answer"]) + vtm_labels = torch.tensor(batch["answer"]).to(pl_module.device).long() + _bs, _t, _c, _h, _w = batch["video"][0].shape + # print(batch.keys()) + + text_ids = torch.stack( + [batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1 + ) + text_masks = torch.stack( + [batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1 + ) + text_labels = torch.stack( + [batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1 + ) + + # concat first option and other options + text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) + text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) + text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) + videos = batch["video"][0].unsqueeze(1).expand(_bs, false_len + 1, _t, _c, _h, _w) + + infer = pl_module.infer( + { + "video": [rearrange(videos, "bs fs t c h w -> (bs fs) t c h w")], + "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), + "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), + "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), + } + ) + score = pl_module.rank_output(infer["cls_feats"])[:, 0] + score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) + loss = F.cross_entropy(score, vtm_labels) + + # print(score, vtm_labels) + + phase = "train" if pl_module.training else "val" + acc = getattr(pl_module, f"{phase}_zero_shot_accuracy")( + score, vtm_labels + ) + # print(acc) + ret = { + "multiple_choice_loss": loss, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_multiple_choice_loss")(ret["multiple_choice_loss"]) + + pl_module.log(f"multiple_choice/{phase}/loss", loss) + pl_module.log(f"multiple_choice/{phase}/accuracy", acc) + return acc + + +# for ind itc +@torch.no_grad() +def compute_ind_irtr_recall(pl_module): + num_views = pl_module.hparams.config["retrieval_views"] + text_embed_arr_multi = [] + vid_embed_arr_multi = [] + for i in range(num_views): + sample_dset = pl_module.trainer.datamodule.video_dms[0].make_no_false_val_dset( + ) + sample_dset.tokenizer = pl_module.trainer.datamodule.video_dms[0].tokenizer + dist_sampler = DistributedSampler(sample_dset, shuffle=False) + sample_loader = torch.utils.data.DataLoader( + sample_dset, + batch_size=1, + num_workers=pl_module.hparams.config["num_workers"], + sampler=dist_sampler, + pin_memory=True, + collate_fn=functools.partial( + sample_dset.collate, + mlm_collator=pl_module.trainer.datamodule.video_dms[0].mlm_collator, + ), + ) + text_preload = list() + text_embed_arr = [] + vid_embed_arr = [] + count = 0 + with torch.no_grad(): + for _b in tqdm.tqdm(sample_loader, desc="text&video prefetch loop"): + # print(_b) + # print(_b.keys()) + _b["text_ids"] = _b["text_ids"].to(pl_module.device) + _b["text_masks"] = _b["text_masks"].to(pl_module.device) + _b["text_labels"] = _b["text_labels"].to(pl_module.device) + _b["video"][0] = _b["video"][0].to(pl_module.device) + + # infer = pl_module.infer(_b, mask_text=False, mask_video=False) + + infer_text = pl_module.infer(_b, mask_text=False, mask_video=False, input_text_only=True) + infer_vision = pl_module.infer(_b, mask_text=False, mask_video=False, input_video_only=True) + + with torch.cuda.amp.autocast(enabled=False): + # text_embed, vid_embed = infer_text["raw_cls_feats"], infer_vision["raw_cls_feats"] + text_embed, vid_embed = infer_text["text_feats"][:, 0], infer_vision["video_feats"][:, 0] + if vid_embed is not None: + vid_embed_all = [torch.zeros_like(vid_embed) for _ in range(pl_module.hparams.config["num_gpus"])] + torch.distributed.all_gather(vid_embed_all, vid_embed) + vid_embed_all = torch.cat(vid_embed_all, dim=0) + if text_embed is not None: + text_embed_all = [torch.zeros_like(text_embed) for _ in range(pl_module.hparams.config["num_gpus"])] + torch.distributed.all_gather(text_embed_all, text_embed) + text_embed_all = torch.cat(text_embed_all, dim=0) + text_embed_arr.append(text_embed_all.cpu()) + vid_embed_arr.append(vid_embed_all.cpu()) + count += 1 + text_embeds = torch.cat(text_embed_arr) + vid_embeds = torch.cat(vid_embed_arr) + # append for multi view + text_embed_arr_multi.append(text_embeds) + vid_embed_arr_multi.append(vid_embeds) + # print(text_embeds.size(), vid_embeds.size()) + for j in range(len(text_embed_arr_multi)): + if j == 0: + st2sv_sims = sim_matrix(text_embed_arr_multi[j], vid_embed_arr_multi[j]).detach().cpu().numpy() / len(text_embed_arr_multi) + else: + st2sv_sims += sim_matrix(text_embed_arr_multi[j], vid_embed_arr_multi[j]).detach().cpu().numpy() / len(text_embed_arr_multi) + # st2sv_sims = sim_matrix(text_embeds, vid_embeds).detach().cpu().numpy() + for metric in [t2v_metrics, v2t_metrics]: + metric_name = metric.__name__ + metrics = metric(st2sv_sims) + if metric == t2v_metrics: + tr_r1, tr_r5, tr_r10, tr_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"] + else: + ir_r1, ir_r5, ir_r10, ir_r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"] + # msg += f"MedR: {metrics['MedR']:g}, MeanR: {metrics['MeanR']:.1f}" + return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) + + +def compute_contrastive(pl_module, batch, return_infer=False, mask_text=False, mask_video=False, caption=False, mode="video"): + infer = pl_module.infer(batch, mask_text=mask_text, mask_video=mask_video, caption=caption, mode=mode) + if mask_text: + text_feats = infer["text_contrastive_feats"] + else: + text_feats = infer["text_feats"] + video_feats = infer["video_feats"] + + if text_feats.ndim == 3: # [B, N, C] -> [B, C] + text_feats = text_feats.mean(1) + if video_feats.ndim == 3: + video_feats= video_feats.mean(1) + + # Normalize the feature + video_feats = video_feats / video_feats.norm(dim=1, keepdim=True) + text_feats = text_feats / text_feats.norm(dim=1, keepdim=True) + + if not pl_module.mmt: + # Plain contrastive + # # TODO: Handle logit_scale when model has no logit_scale + video_feats = distnn.all_gather(video_feats) + text_feats = distnn.all_gather(text_feats) + + if not isinstance(video_feats, torch.Tensor) or video_feats.ndim == 3: + video_feats = torch.cat(list(video_feats)) + text_feats = torch.cat(list(text_feats)) + + image_logits = video_feats @ text_feats.t() * pl_module.clip.logit_scale.exp() + text_logits = image_logits.t() + + ground_truth = torch.arange(len(image_logits)).long().to(image_logits.device) + loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(text_logits, ground_truth)).div(2) + else: + text_feats_k = infer["text_feats_k"] + video_feats_k = infer["video_feats_k"] + if video_feats_k.ndim == 3: + video_feats_k = video_feats_k.mean(1) + if text_feats_k.ndim == 3: + text_feats_k = text_feats_k.mean(1) + + video_feats_k = video_feats_k / video_feats_k.norm(dim=1, keepdim=True) + text_feats_k = text_feats_k / text_feats_k.norm(dim=1, keepdim=True) + + video_l_pos = torch.einsum('nc, nc->n', video_feats, text_feats_k).unsqueeze(-1) + video_l_neg = torch.einsum('nc, ck->nk', video_feats, pl_module.queue_text) + image_logits = torch.cat([video_l_pos, video_l_neg], dim=1) * pl_module.clip.logit_scale.exp() + + text_l_pos = torch.einsum('nc, nc->n', text_feats, video_feats_k).unsqueeze(-1) + text_l_neg = torch.einsum('nc, ck->nk', text_feats, pl_module.queue_visual) + text_logits = torch.cat([text_l_pos, text_l_neg], dim=1) * pl_module.clip.logit_scale.exp() + + ground_truth = torch.zeros(image_logits.shape[0], dtype=torch.long).to(image_logits.device) + loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(text_logits, ground_truth)).div(2) + + pl_module._dequeue_and_enqueue(text_feats_k, video_feats_k) + + + ret = { + "contrastive_loss": loss, + "contrastive_image_logits": image_logits, + "contrastive_text_logits": text_logits, + "contrastive_labels": ground_truth, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_contrastive_loss")(ret["contrastive_loss"]) + acc_image = getattr(pl_module, f"{phase}_contrastive_image_accuracy")( + ret["contrastive_image_logits"], ret["contrastive_labels"] + ) + acc_text = getattr(pl_module, f"{phase}_contrastive_text_accuracy")( + ret["contrastive_text_logits"], ret["contrastive_labels"] + ) + pl_module.log(f"contrastive/{phase}/loss", loss) + pl_module.log(f"contrastive/{phase}/image_accuracy", acc_image) + pl_module.log(f"contrastive/{phase}/text_accuracy", acc_text) + + if return_infer: + return ret, infer + + return ret + + +def compute_cap(pl_module, batch, infer=None, mode="video"): + if infer is None: # Skip infer if infer is not None + infer = pl_module.infer(batch, caption=True, mode=mode) + cap_logits = infer["cap_logits"] + # The first is sot_token, prediction starts from the second token + # Note that there is also an eot_token at the end of each seq + cap_labels = infer["text_ids"][:, 1:].long() + special_tokens_mask = infer["special_tokens_mask"][:, 1:] # 1 for masked + + cap_labels.masked_fill_(special_tokens_mask, value=-100) + + cap_loss = F.cross_entropy( + cap_logits.reshape(-1, pl_module.hparams.config["vocab_size"]), + cap_labels.reshape(-1), + ignore_index=-100, + ) + + ret = { + "cap_loss": cap_loss, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_cap_loss")(cap_loss) + acc = getattr(pl_module, f"{phase}_cap_accuracy")( + cap_logits, cap_labels, + ) + pl_module.log(f"cap/{phase}/loss", loss) + pl_module.log(f"cap/{phase}/accuracy", acc) + + return ret + + +def compute_zs_classify(pl_module, batch, text_ret): + text_feats = text_ret["text_feats"] + num_text_aug = text_ret["num_text_aug"] + labels = torch.tensor(batch["answer"]).to(pl_module.device).long() + + video_feats = pl_module.forward_video(batch)["video_feats"] + + if text_feats.ndim == 3: # [B, N, C] -> [B, C] + text_feats = text_feats.mean(1) + if video_feats.ndim == 3: + video_feats= video_feats.mean(1) + + text_feats /= text_feats.norm(dim=-1, keepdim=True) + video_feats /= video_feats.norm(dim=-1, keepdim=True) + + similarity = (pl_module.clip.logit_scale.exp() * video_feats @ text_feats.T) + B, _ = video_feats.shape + assert similarity.view(B, num_text_aug, -1).shape[-1] == 400, similarity.shape + similarity = similarity.view(B, num_text_aug, -1).softmax(dim=-1) + similarity = similarity.mean(dim=1, keepdim=False) + + + phase = "train" if pl_module.training else "val" + ret = { + "similarity": similarity, + "labels": labels, + } + + acc = getattr(pl_module, f"{phase}_zs_classify_accuracy")(similarity, labels) + pl_module.log(f"zs_classify/{phase}/accuracy", acc) + + return ret + +def compute_mim(pl_module, batch, infer=None, mode="video"): + if infer is None: # Skip infer if infer is not None + infer = pl_module.infer(batch, mask_image=True, mode=mode) + mim_feats = infer["mim_video_feats"] # N, Lu, C + video = infer["video"] # N, C, T, H, W + masked_indices = infer["visual_masked_indices"] # N * T, L + + patch_size = int(math.sqrt(mim_feats.size(-1) // 3 // 2)) + assert 3 * patch_size * patch_size == mim_feats.size(-1) + + if mode == "image": + assert video.size(3) == 1 + video = video.expand(-1, -1, 2) + + img_patch = rearrange( + video, 'b c (p0 t) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', + p0=2, p1=patch_size, p2=patch_size + ) + img_patch = (img_patch - img_patch.mean(dim=-2, keepdim=True) + ) / (img_patch.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) + img_patch = rearrange(img_patch, 'b l p c -> b l (p c)') + N, _, C = img_patch.shape + img_patch_mask = img_patch[masked_indices].reshape(N, -1, C) + + mim_loss = F.mse_loss(mim_feats, img_patch_mask) + + ret = { + "mim_loss": mim_loss, + } + + phase = "train" if pl_module.training else "val" + loss = getattr(pl_module, f"{phase}_mim_loss")(ret["mim_loss"]) + pl_module.log(f"mim/{phase}/loss", loss) + + return ret + +def init_weights(module): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +def vqa_test_step(pl_module, batch, output): + id2answer = ( + pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer + if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts + else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer + ) + vqa_logits = output["vqa_logits"] + vqa_preds = vqa_logits.argmax(dim=-1) + vqa_preds = [id2answer[pred.item()] for pred in vqa_preds] + questions = batch["text"] + qids = batch["qid"] + return {"qids": qids, "preds": vqa_preds} + + +def openend_vqa_test_step(pl_module, batch, output): + id2answer = ( + pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer + if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts + else pl_module.trainer.datamodule.dm_dicts["msrvttqa"].id2answer + ) + vqa_logits = output["vqa_logits"] + vqa_preds = vqa_logits.argmax(dim=-1) + vqa_preds = [id2answer[pred.item()] for pred in vqa_preds] + questions = batch["text"] + qids = batch["qid"] + return {"qids": qids, "preds": vqa_preds} + + +def arc_test_step(pl_module, batch, output): + return output + + +def vqa_test_wrapup(outs, model_name): + rank = torch.distributed.get_rank() + qids, preds = list(), list() + for out in outs: + qids += out["qids"] + preds += out["preds"] + + rets = list() + for qid, pred in zip(qids, preds): + rets.append({"question_id": qid, "answer": pred}) + with open(f"vqa_submit_{rank}.json", "w") as fp: + json.dump(rets, fp, indent=4) + + torch.distributed.barrier() + + if rank == 0: + jsons = list() + paths = list(glob.glob("vqa_submit_*.json")) + for path in paths: + with open(path, "r") as fp: + jsons += json.load(fp) + os.makedirs("result", exist_ok=True) + with open(f"result/vqa_submit_{model_name}.json", "w") as fp: + json.dump(jsons, fp, indent=4) + + torch.distributed.barrier() + os.remove(f"vqa_submit_{rank}.json") + + +def arc_test_wrapup(outs, caplen, model_name): + rank = torch.distributed.get_rank() + iids, captions = list(), list() + for out in outs: + iids += out["iid"] + captions += out["captions"] + + rets = list() + for iid, caption in zip(iids, captions): + rets.append({"video_id": iid, "caption": caption}) + with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp: + json.dump(rets, fp, indent=4) + + torch.distributed.barrier() + + if rank == 0: + jsons = list() + paths = list(glob.glob(f"coco_cap_len{caplen}_*.json")) + for path in paths: + with open(path, "r") as fp: + jsons += json.load(fp) + os.makedirs("result/arc", exist_ok=True) + jsons = sorted(jsons, key=lambda x: x["video_id"]) + with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp: + json.dump(jsons, fp, indent=4) + + torch.distributed.barrier() + os.remove(f"coco_cap_len{caplen}_{rank}.json") diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/retrieval_metrics.py b/Downstream/multi-modalities-downstream/CoTrain/modules/retrieval_metrics.py new file mode 100644 index 0000000..7ad99ef --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/retrieval_metrics.py @@ -0,0 +1,362 @@ +import os +import math +import numbers +from pathlib import Path +import ipdb +import numpy as np +import torch +import scipy.stats +from sklearn.metrics import average_precision_score +import ipdb +import pdb + +def t2v_metrics(sims, query_masks=None): + """Compute retrieval metrics from a similiarity matrix. + + Args: + sims (th.Tensor): N x M matrix of similarities between embeddings, where + x_{i,j} = + query_masks (th.Tensor): mask any missing queries from the dataset (two videos + in MSRVTT only have 19, rather than 20 captions) + + Returns: + (dict[str:float]): retrieval metrics + """ + assert sims.ndim == 2, "expected a matrix" + num_queries, num_vids = sims.shape + dists = -sims + sorted_dists = np.sort(dists, axis=1) + + # The indices are computed such that they slice out the ground truth distances + # from the psuedo-rectangular dist matrix + queries_per_video = num_queries // num_vids + gt_idx = [[np.ravel_multi_index([ii, jj], (num_queries, num_vids)) + for ii in range(jj * queries_per_video, (jj + 1) * queries_per_video)] + for jj in range(num_vids)] + gt_idx = np.array(gt_idx) + gt_dists = dists.reshape(-1)[gt_idx.reshape(-1)] + gt_dists = gt_dists[:, np.newaxis] + rows, cols = np.where((sorted_dists - gt_dists) == 0) # find column position of GT + + # -------------------------------- + # NOTE: Breaking ties + # -------------------------------- + # We sometimes need to break ties (in general, these should occur extremely rarely, + # but there are pathological cases when they can distort the scores, such as when + # the similarity matrix is all zeros). Previous implementations (e.g. the t2i + # evaluation function used + # here: https://github.com/niluthpol/multimodal_vtt/blob/master/evaluation.py and + # here: https://github.com/linxd5/VSE_Pytorch/blob/master/evaluation.py#L87) generally + # break ties "optimistically". However, if the similarity matrix is constant this + # can evaluate to a perfect ranking. A principled option is to average over all + # possible partial orderings implied by the ties. See # this paper for a discussion: + # McSherry, Frank, and Marc Najork, + # "Computing information retrieval performance measures efficiently in the presence + # of tied scores." European conference on information retrieval. Springer, Berlin, + # Heidelberg, 2008. + # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.145.8892&rep=rep1&type=pdf + + break_ties = "optimistically" + #break_ties = "averaging" + + if rows.size > num_queries: + assert np.unique(rows).size == num_queries, "issue in metric evaluation" + if break_ties == "optimistically": + _, idx = np.unique(rows, return_index=True) + cols = cols[idx] + elif break_ties == "averaging": + # fast implementation, based on this code: + # https://stackoverflow.com/a/49239335 + locs = np.argwhere((sorted_dists - gt_dists) == 0) + + # Find the split indices + steps = np.diff(locs[:, 0]) + splits = np.nonzero(steps)[0] + 1 + splits = np.insert(splits, 0, 0) + + # Compute the result columns + summed_cols = np.add.reduceat(locs[:, 1], splits) + counts = np.diff(np.append(splits, locs.shape[0])) + avg_cols = summed_cols / counts + if False: + print("Running slower code to verify rank averaging across ties") + # slow, but more interpretable version, used for testing + avg_cols_slow = [np.mean(cols[rows == idx]) for idx in range(num_queries)] + assert np.array_equal(avg_cols, avg_cols_slow), "slow vs fast difference" + print("passed num check") + cols = avg_cols + + msg = "expected ranks to match queries ({} vs {}) " + if cols.size != num_queries: + import ipdb; + ipdb.set_trace() + assert cols.size == num_queries, msg + + if False: + # overload mask to check that we can recover the scores for single-query + # retrieval + print("DEBUGGING MODE") + query_masks = np.zeros_like(query_masks) + query_masks[:, 0] = 1 # recover single query score + + if query_masks is not None: + # remove invalid queries + assert query_masks.size == num_queries, "invalid query mask shape" + cols = cols[query_masks.reshape(-1).astype(np.bool)] + assert cols.size == query_masks.sum(), "masking was not applied correctly" + # update number of queries to account for those that were missing + num_queries = query_masks.sum() + + if False: + # sanity check against old logic for square matrices + gt_dists_old = np.diag(dists) + gt_dists_old = gt_dists_old[:, np.newaxis] + _, cols_old = np.where((sorted_dists - gt_dists_old) == 0) + assert np.array_equal(cols_old, cols), "new metric doesn't match" + + return cols2metrics(cols, num_queries) + + +def v2t_metrics(sims, query_masks=None): + """Compute retrieval metrics from a similiarity matrix. + + Args: + sims (th.Tensor): N x M matrix of similarities between embeddings, where + x_{i,j} = + query_masks (th.Tensor): mask any missing captions from the dataset + + Returns: + (dict[str:float]): retrieval metrics + + NOTES: We find the closest "GT caption" in the style of VSE, which corresponds + to finding the rank of the closest relevant caption in embedding space: + github.com/ryankiros/visual-semantic-embedding/blob/master/evaluation.py#L52-L56 + """ + # switch axes of text and video + sims = sims.T + + if False: + # experiment with toy example + sims = np.ones((3, 3)) + sims[0, 0] = 2 + sims[1, 1:2] = 2 + sims[2, :] = 2 + query_masks = None + + assert sims.ndim == 2, "expected a matrix" + num_queries, num_caps = sims.shape + dists = -sims + caps_per_video = num_caps // num_queries + break_ties = "averaging" + + MISSING_VAL = 1E8 + query_ranks = [] + for ii in range(num_queries): + row_dists = dists[ii, :] + if query_masks is not None: + # Set missing queries to have a distance of infinity. A missing query + # refers to a query position `n` for a video that had less than `n` + # captions (for example, a few MSRVTT videos only have 19 queries) + row_dists[np.logical_not(query_masks.reshape(-1))] = MISSING_VAL + + # NOTE: Using distance subtraction to perform the ranking is easier to make + # deterministic than using argsort, which suffers from the issue of defining + # "stability" for equal distances. Example of distance subtraction code: + # github.com/antoine77340/Mixture-of-Embedding-Experts/blob/master/train.py + sorted_dists = np.sort(row_dists) + + min_rank = np.inf + for jj in range(ii * caps_per_video, (ii + 1) * caps_per_video): + if row_dists[jj] == MISSING_VAL: + # skip rankings of missing captions + continue + ranks = np.where((sorted_dists - row_dists[jj]) == 0)[0] + if break_ties == "optimistically": + rank = ranks[0] + elif break_ties == "averaging": + # NOTE: If there is more than one caption per video, its possible for the + # method to do "worse than chance" in the degenerate case when all + # similarities are tied. TODO(Samuel): Address this case. + rank = ranks.mean() + if rank < min_rank: + min_rank = rank + query_ranks.append(min_rank) + query_ranks = np.array(query_ranks) + + # sanity check against old version of code + if False: + sorted_dists = np.sort(dists, axis=1) + gt_dists_old = np.diag(dists) + gt_dists_old = gt_dists_old[:, np.newaxis] + rows_old, cols_old = np.where((sorted_dists - gt_dists_old) == 0) + if rows_old.size > num_queries: + _, idx = np.unique(rows_old, return_index=True) + cols_old = cols_old[idx] + num_diffs = (1 - (cols_old == query_ranks)).sum() + msg = f"new metric doesn't match in {num_diffs} places" + assert np.array_equal(cols_old, query_ranks), msg + + # visualise the distance matrix + import sys + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + sys.path.insert(0, str(Path.home() / "coding/src/zsvision/python")) + from zsvision.zs_iterm import zs_dispFig # NOQA + plt.matshow(dists) + zs_dispFig() + + return cols2metrics(query_ranks, num_queries) + + +def retrieval_as_classification(sims, query_masks=None): + """Compute classification metrics from a similiarity matrix. + """ + assert sims.ndim == 2, "expected a matrix" + + # switch axes of query-labels and video + sims = sims.T + query_masks = query_masks.T + dists = -sims + num_queries, num_labels = sims.shape + break_ties = "averaging" + + query_ranks = [] + for ii in range(num_queries): + row_dists = dists[ii, :] + + # NOTE: Using distance subtraction to perform the ranking is easier to make + # deterministic than using argsort, which suffers from the issue of defining + # "stability" for equal distances. Example of distance subtraction code: + # github.com/antoine77340/Mixture-of-Embedding-Experts/blob/master/train.py + sorted_dists = np.sort(row_dists) + + # min_rank = np.inf + label_ranks = [] + for gt_label in np.where(query_masks[ii, :])[0]: + ranks = np.where((sorted_dists - row_dists[gt_label]) == 0)[0] + if break_ties == "optimistically": + rank = ranks[0] + elif break_ties == "averaging": + # NOTE: If there is more than one caption per video, its possible for the + # method to do "worse than chance" in the degenerate case when all + # similarities are tied. TODO(Samuel): Address this case. + rank = ranks.mean() + else: + raise ValueError(f"unknown tie-breaking method: {break_ties}") + label_ranks.append(rank) + # Avoid penalising for assigning higher similarity to other gt labels. This is + # done by subtracting out the better ranked query labels. Note that this step + # introduces a slight skew in favour of videos with lots of labels. We can + # address this later with a normalisation step if needed. + label_ranks = [x - idx for idx, x in enumerate(label_ranks)] + + # Include all labels in the final calculation + query_ranks.extend(label_ranks) + query_ranks = np.array(query_ranks) + + # sanity check against old version of code + if False: + # visualise the distance matrix + import sys + import matplotlib + matplotlib.use("Agg") + import matplotlib.pyplot as plt + sys.path.insert(0, str(Path.home() / "coding/src/zsvision/python")) + from zsvision.zs_iterm import zs_dispFig # NOQA + # plt.matshow(dists) + # zs_dispFig() + plt.hist(query_ranks, bins=313, alpha=0.5) + plt.grid() + zs_dispFig() + import ipdb; + ipdb.set_trace() + + return cols2metrics(query_ranks, num_queries=len(query_ranks)) + + +def cols2metrics(cols, num_queries): + metrics = {} + metrics["R1"] = 100 * float(np.sum(cols == 0)) / num_queries + metrics["R5"] = 100 * float(np.sum(cols < 5)) / num_queries + metrics["R10"] = 100 * float(np.sum(cols < 10)) / num_queries + metrics["R50"] = 100 * float(np.sum(cols < 50)) / num_queries + metrics["MedR"] = np.median(cols) + 1 + metrics["MeanR"] = np.mean(cols) + 1 + stats = [metrics[x] for x in ("R1", "R5", "R10")] + metrics["geometric_mean_R1-R5-R10"] = scipy.stats.mstats.gmean(stats) + return metrics + + +def mean_average_precision(sims, query_masks=None): + ap_meter = APMeter() + ap_meter.add(output=sims.T, target=query_masks.T) + return {"mAP": ap_meter.value().mean()} + +def acc(output, target): + with torch.no_grad(): + pred = torch.argmax(output, dim=1) + assert pred.shape[0] == len(target) + correct = 0 + correct += torch.sum(pred == target).item() + return correct / len(target) + + +def my_metric2(output, target, k=3): + with torch.no_grad(): + pred = torch.topk(output, k, dim=1)[1] + assert pred.shape[0] == len(target) + correct = 0 + for i in range(k): + correct += torch.sum(pred[:, i] == target).item() + return correct / len(target) + + +def video_precision(output, target): + """ percentage of videos which have been aligned to a matching text pair""" + assert output.shape[0] == target.shape[0] + assert output.shape[2] == target.shape[2] == 2 + + correct = 0 + for bout, btarg in zip(output, target): + for pair in bout: + eq = torch.eq(pair, btarg) + if torch.logical_and(eq[:, 0], eq[:, 1]).any(): + correct += 1 + return correct / (target.shape[0] * target.shape[1]) + +def video_precision_adj(output, target): + """ adjusts the video precision metric by ignoring videos which have no aligning text.""" + assert output.shape[0] == target.shape[0] + assert output.shape[2] == target.shape[2] == 2 + + assert output.shape[0] == target.shape[0] + assert output.shape[2] == target.shape[2] == 2 + + correct = 0 + for bout, btarg in zip(output, target): + for pair in bout: + eq = torch.eq(pair, btarg) + if torch.logical_and(eq[:, 0], eq[:, 1]).any(): + correct += 1 + denom = len(target[:, :, 0].unique()) + + return correct / denom + +def video_precision_adj(output, target): + """ adjusts the video precision metric by ignoring videos which have no aligning text.""" + assert output.shape[0] == target.shape[0] + assert output.shape[2] == target.shape[2] == 2 + + assert output.shape[0] == target.shape[0] + assert output.shape[2] == target.shape[2] == 2 + + correct = 0 + for bout, btarg in zip(output, target): + for pair in bout: + eq = torch.eq(pair, btarg) + if torch.logical_and(eq[:, 0], eq[:, 1]).any(): + correct += 1 + denom = len(target[:, :, 0].unique()) + + return correct / denom diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/temporal_roll.py b/Downstream/multi-modalities-downstream/CoTrain/modules/temporal_roll.py new file mode 100644 index 0000000..557bc03 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/temporal_roll.py @@ -0,0 +1,63 @@ +import torch +import torch.nn as nn +import random + + +class TemporalRoll(nn.Module): + def __init__(self, n_segment=3, n_div=8, v=0): + super(TemporalRoll, self).__init__() + self.n_segment = n_segment + self.fold_div = n_div + self.v = v + + def forward(self, x, layer=1): + # return x + nt, l, c = x.size() + n_batch = nt // self.n_segment + x = x.view(n_batch, self.n_segment, l, c) + if self.v == 0: + # 16, 3, 197, 768 + fold = l // self.fold_div + out = torch.zeros_like(x) + # keep cls token + out[:, :, 0] = x[:, :, 0] + # roll left step 1 along time dimension (1) + out[:, :, 1:fold+1] = torch.roll(x[:, :, 1:fold+1], 1, 1) + # roll right step 1 along time dimension (1) + out[:, :, -fold:] = torch.roll(x[:, :, -fold:], -1, 1) + # not roll + out[:, :, 1+fold:-fold] = x[:, :, 1+fold: -fold] + # # 16, 3, 197, 768 + # fold = l // self.fold_div + # out = torch.zeros_like(x) + # # roll left step 1 along time dimension (1) + # out[:, :, :fold] = torch.roll(x[:, :, :fold], 1, 1) + # # roll right step 1 along time dimension (1) + # out[:, :, -fold:] = torch.roll(x[:, :, -fold:], -1, 1) + # # not roll + # out[:, :, fold:-fold] = x[:, :, fold: -fold] + # random sampling + elif self.v == 1: + out = torch.zeros_like(x) + roll_token_idexs = random.sample(range(1, l), l//2) + # print(roll_token_idexs) + out = x + out[:, :, roll_token_idexs] = torch.roll(x[:, :, roll_token_idexs], 1, 1) + # roll different tokens for different blocks + elif self.v == 2: + rolled_token_len = l // self.fold_div + fold = rolled_token_len * (layer % self.fold_div) + begin_index = 1 + fold + end_index = min(1 + fold + rolled_token_len, l) + out = torch.zeros_like(x) + out[:, :, 0] = x[:, :, 0] # cls token unchanged + out[:, :, begin_index:] = x[:, :, begin_index:] + out[:, :, begin_index:end_index] = torch.roll(x[:, :, begin_index:end_index], 1, 1) + out[:, :, end_index:] = x[:, :, end_index:] + else: # not roll + fold = c // self.fold_div + out = torch.zeros_like(x) + out[:, :-1, :fold] = x[:, 1:, :fold] # shift left tokens + out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right tokens + out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift + return out.view(nt, l, c) \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/modules/text_prompt.py b/Downstream/multi-modalities-downstream/CoTrain/modules/text_prompt.py new file mode 100644 index 0000000..92ba7ae --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/modules/text_prompt.py @@ -0,0 +1,35 @@ +# Code for "ActionCLIP: ActionCLIP: A New Paradigm for Action Recognition" +# arXiv: +# Mengmeng Wang, Jiazheng Xing, Yong Liu + +import torch +import CoTrain.modules.InternVideo as internvideo +from CoTrain.datasets import K400VideoDataset + + +def text_prompt(data = K400VideoDataset.classes(), prompt_type='all'): + if prompt_type == 'all': + text_aug = [f"a photo of action {{}}", f"a picture of action {{}}", f"Human action of {{}}", f"{{}}, an action", + f"{{}} this is an action", f"{{}}, a video of action", f"Playing action of {{}}", f"{{}}", + f"Playing a kind of action, {{}}", f"Doing a kind of action, {{}}", f"Look, the human is {{}}", + f"Can you recognize the action of {{}}?", f"Video classification of {{}}", f"A video of {{}}", + f"The man is {{}}", f"The woman is {{}}"] + elif prompt_type == 'single': + text_aug = [f"A video of {{}}"] + elif prompt_type == 'single_doing': + text_aug = [f"A person is doing {{}}"] + elif prompt_type == 'no': + text_aug = [f"{{}}"] + print('-' * 80) + print('Prompt:') + print(text_aug) + print('-' * 80) + text_dict = {} + num_text_aug = len(text_aug) + + for ii, txt in enumerate(text_aug): + text_dict[ii] = torch.cat([internvideo.tokenize(txt.format(c), truncate=True) for c in data]) + + classes = torch.cat([v for _, v in text_dict.items()]) + + return classes, num_text_aug, text_dict \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/requirements.txt b/Downstream/multi-modalities-downstream/CoTrain/requirements.txt new file mode 100644 index 0000000..6b284ac --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/requirements.txt @@ -0,0 +1,4 @@ +numpy==1.21.6 +setuptools==61.2.0 +torch==1.9.0+cu111 +torchvision==0.10.0+cu111 diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/__init__.py new file mode 100644 index 0000000..a4fb649 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/__init__.py @@ -0,0 +1,15 @@ +from CoTrain.transforms.image.pixelbert import ( + pixelbert_transform, + pixelbert_transform_randaug, + open_clip_transform, +) + +_transforms = { + "pixelbert": pixelbert_transform, + "pixelbert_randaug": pixelbert_transform_randaug, + "open_clip": open_clip_transform, +} + + +def keys_to_transforms(keys: list, size=224, mode="train"): + return [_transforms[key](size=size, mode=mode) for key in keys] diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/image/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/image/functional.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/functional.py new file mode 100644 index 0000000..6069b9e --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/functional.py @@ -0,0 +1,93 @@ +import numbers +import torch +import cv2 +import numpy as np +import PIL + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.NEAREST + else: + pil_inter = PIL.Image.BILINEAR + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip_test.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + dim = len(mean) + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + # print(clip_test.size()) + # if dim == 3: + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + # else: + # clip_test.sub_(mean[:, None, None]).div_(std[:, None, None]) + return clip \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/image/imageaug.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/imageaug.py new file mode 100644 index 0000000..27da5d7 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/imageaug.py @@ -0,0 +1,16 @@ +def image_aug(images, image_transform): + # print(image_transform) + # I've no idea what the fuck this is doing + # TODO: Maybe remove the second view? + global_transform = image_transform[0] + # local_transform = image_transform[0][1] + global_images_tensor = [] + # 2 GLOBAL views + for i in range(2): + global_images_tensor.append(global_transform(images).unsqueeze(0)) + return global_images_tensor + # # 3 LOCAL VIEWS + # local_images_tensor = [] + # for i in range(3): + # local_images_tensor.append(local_transform(images).unsqueeze(0)) + # return [global_images_tensor, local_images_tensor] \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/image/pixelbert.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/pixelbert.py new file mode 100644 index 0000000..07f7d55 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/pixelbert.py @@ -0,0 +1,59 @@ +from .utils import ( + inception_normalize, + MinMaxResize, +) +from torchvision import transforms +from .randaug import RandAugment + + +def pixelbert_transform(size=800, mode="train"): + longer = int((1333 / 800) * size) + return transforms.Compose( + [ + MinMaxResize(shorter=size, longer=longer), + transforms.ToTensor(), + inception_normalize, + ] + ) + + +def pixelbert_transform_randaug(size=800, mode="train"): + longer = int((1333 / 800) * size) + trs = transforms.Compose( + [ + MinMaxResize(shorter=size, longer=longer), + transforms.ToTensor(), + inception_normalize, + ] + ) + trs.transforms.insert(0, RandAugment(2, 9)) + return trs + + +def open_clip_transform(size=224, mode="train"): + input_mean = [0.48145466, 0.4578275, 0.40821073] + input_std = [0.26862954, 0.26130258, 0.27577711] + if mode == "train": + return transforms.Compose( + [ + transforms.RandomResizedCrop( + size, + scale=(0.9, 1.0), + interpolation=transforms.InterpolationMode.BICUBIC, + ), + transforms.ToTensor(), + transforms.Normalize(mean=input_mean, std=input_std), + ] + ) + else: + return transforms.Compose( + [ + transforms.Resize( + size, + interpolation=transforms.InterpolationMode.BICUBIC, + ), + transforms.CenterCrop(size), + transforms.ToTensor(), + transforms.Normalize(mean=input_mean, std=input_std), + ] + ) diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/image/randaug.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/randaug.py new file mode 100644 index 0000000..281d1c2 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/randaug.py @@ -0,0 +1,269 @@ +# code in this file is adpated from rpmcruz/autoaugment +# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py +import random + +import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw +import numpy as np +import torch +from PIL import Image + + +def ShearX(img, v): # [-0.3, 0.3] + assert -0.3 <= v <= 0.3 + if random.random() > 0.5: + v = -v + return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0)) + + +def ShearY(img, v): # [-0.3, 0.3] + assert -0.3 <= v <= 0.3 + if random.random() > 0.5: + v = -v + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0)) + + +def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45] + assert -0.45 <= v <= 0.45 + if random.random() > 0.5: + v = -v + v = v * img.size[0] + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)) + + +def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45] + assert 0 <= v + if random.random() > 0.5: + v = -v + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)) + + +def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45] + assert -0.45 <= v <= 0.45 + if random.random() > 0.5: + v = -v + v = v * img.size[1] + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) + + +def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45] + assert 0 <= v + if random.random() > 0.5: + v = -v + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) + + +def Rotate(img, v): # [-30, 30] + assert -30 <= v <= 30 + if random.random() > 0.5: + v = -v + return img.rotate(v) + + +def AutoContrast(img, _): + return PIL.ImageOps.autocontrast(img) + + +def Invert(img, _): + return PIL.ImageOps.invert(img) + + +def Equalize(img, _): + return PIL.ImageOps.equalize(img) + + +def Flip(img, _): # not from the paper + return PIL.ImageOps.mirror(img) + + +def Solarize(img, v): # [0, 256] + assert 0 <= v <= 256 + return PIL.ImageOps.solarize(img, v) + + +def SolarizeAdd(img, addition=0, threshold=128): + img_np = np.array(img).astype(np.int) + img_np = img_np + addition + img_np = np.clip(img_np, 0, 255) + img_np = img_np.astype(np.uint8) + img = Image.fromarray(img_np) + return PIL.ImageOps.solarize(img, threshold) + + +def Posterize(img, v): # [4, 8] + v = int(v) + v = max(1, v) + return PIL.ImageOps.posterize(img, v) + + +def Contrast(img, v): # [0.1,1.9] + assert 0.1 <= v <= 1.9 + return PIL.ImageEnhance.Contrast(img).enhance(v) + + +def Color(img, v): # [0.1,1.9] + assert 0.1 <= v <= 1.9 + return PIL.ImageEnhance.Color(img).enhance(v) + + +def Brightness(img, v): # [0.1,1.9] + assert 0.1 <= v <= 1.9 + return PIL.ImageEnhance.Brightness(img).enhance(v) + + +def Sharpness(img, v): # [0.1,1.9] + assert 0.1 <= v <= 1.9 + return PIL.ImageEnhance.Sharpness(img).enhance(v) + + +def Cutout(img, v): # [0, 60] => percentage: [0, 0.2] + assert 0.0 <= v <= 0.2 + if v <= 0.0: + return img + + v = v * img.size[0] + return CutoutAbs(img, v) + + +def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2] + # assert 0 <= v <= 20 + if v < 0: + return img + w, h = img.size + x0 = np.random.uniform(w) + y0 = np.random.uniform(h) + + x0 = int(max(0, x0 - v / 2.0)) + y0 = int(max(0, y0 - v / 2.0)) + x1 = min(w, x0 + v) + y1 = min(h, y0 + v) + + xy = (x0, y0, x1, y1) + color = (125, 123, 114) + # color = (0, 0, 0) + img = img.copy() + PIL.ImageDraw.Draw(img).rectangle(xy, color) + return img + + +def SamplePairing(imgs): # [0, 0.4] + def f(img1, v): + i = np.random.choice(len(imgs)) + img2 = PIL.Image.fromarray(imgs[i]) + return PIL.Image.blend(img1, img2, v) + + return f + + +def Identity(img, v): + return img + + +def augment_list(): # 16 oeprations and their ranges + # https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57 + # l = [ + # (Identity, 0., 1.0), + # (ShearX, 0., 0.3), # 0 + # (ShearY, 0., 0.3), # 1 + # (TranslateX, 0., 0.33), # 2 + # (TranslateY, 0., 0.33), # 3 + # (Rotate, 0, 30), # 4 + # (AutoContrast, 0, 1), # 5 + # (Invert, 0, 1), # 6 + # (Equalize, 0, 1), # 7 + # (Solarize, 0, 110), # 8 + # (Posterize, 4, 8), # 9 + # # (Contrast, 0.1, 1.9), # 10 + # (Color, 0.1, 1.9), # 11 + # (Brightness, 0.1, 1.9), # 12 + # (Sharpness, 0.1, 1.9), # 13 + # # (Cutout, 0, 0.2), # 14 + # # (SamplePairing(imgs), 0, 0.4), # 15 + # ] + + # https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505 + l = [ + (AutoContrast, 0, 1), + (Equalize, 0, 1), + # (Invert, 0, 1), + (Rotate, 0, 30), + (Posterize, 0, 4), + (Solarize, 0, 256), + (SolarizeAdd, 0, 110), + (Color, 0.1, 1.9), + (Contrast, 0.1, 1.9), + (Brightness, 0.1, 1.9), + (Sharpness, 0.1, 1.9), + (ShearX, 0.0, 0.3), + (ShearY, 0.0, 0.3), + # (CutoutAbs, 0, 40), + (TranslateXabs, 0.0, 100), + (TranslateYabs, 0.0, 100), + ] + + return l + + +class Lighting(object): + """Lighting noise(AlexNet - style PCA - based noise)""" + + def __init__(self, alphastd, eigval, eigvec): + self.alphastd = alphastd + self.eigval = torch.Tensor(eigval) + self.eigvec = torch.Tensor(eigvec) + + def __call__(self, img): + if self.alphastd == 0: + return img + + alpha = img.new().resize_(3).normal_(0, self.alphastd) + rgb = ( + self.eigvec.type_as(img) + .clone() + .mul(alpha.view(1, 3).expand(3, 3)) + .mul(self.eigval.view(1, 3).expand(3, 3)) + .sum(1) + .squeeze() + ) + + return img.add(rgb.view(3, 1, 1).expand_as(img)) + + +class CutoutDefault(object): + """ + Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py + """ + + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1:y2, x1:x2] = 0.0 + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + return img + + +class RandAugment: + def __init__(self, n, m): + self.n = n + self.m = m # [0, 30] + self.augment_list = augment_list() + + def __call__(self, img): + ops = random.choices(self.augment_list, k=self.n) + for op, minval, maxval in ops: + val = (float(self.m) / 30) * float(maxval - minval) + minval + img = op(img, val) + + return img diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/image/utils.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/utils.py new file mode 100644 index 0000000..bc99f62 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/image/utils.py @@ -0,0 +1,56 @@ +from torchvision import transforms +from PIL import Image + + +class MinMaxResize: + def __init__(self, shorter=800, longer=1333): + self.min = shorter + self.max = longer + + def __call__(self, x): + w, h = x.size + scale = self.min / min(w, h) + if h < w: + newh, neww = self.min, scale * w + else: + newh, neww = scale * h, self.min + + if max(newh, neww) > self.max: + scale = self.max / max(newh, neww) + newh = newh * scale + neww = neww * scale + + newh, neww = int(newh + 0.5), int(neww + 0.5) + newh, neww = newh // 32 * 32, neww // 32 * 32 + + return x.resize((neww, newh), resample=Image.BICUBIC) + + +class UnNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor): + """ + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be normalized. + Returns: + Tensor: Normalized image. + """ + for t, m, s in zip(tensor, self.mean, self.std): + t.mul_(s).add_(m) + # The normalize code -> t.sub_(m).div_(s) + return tensor + + +# This is simple maximum entropy normalization performed in Inception paper +inception_normalize = transforms.Compose( + [transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] +) + +# ViT uses simple non-biased inception normalization +# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132 +inception_unnormalize = transforms.Compose( + [UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] +) diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/video/__init__.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/video/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/video/video_transform.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/video/video_transform.py new file mode 100644 index 0000000..578ccf5 --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/video/video_transform.py @@ -0,0 +1,663 @@ +import numbers +import random +import numpy as np +import PIL +import skimage +import skimage.transform +import torchvision +import torch +from CoTrain.transforms.image import functional as F +from torchvision import transforms +from PIL import Image + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format + """ + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip_test (list of numpy.ndarray): clip_test (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = tensor_clip.div(255) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor + """ + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor + +class ColorDistortion(object): + def __init__(self, s=1.0): + self.s = s + self.color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s) + self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8) + self.rnd_gray = transforms.RandomGrayscale(p=0.2) + + def __call__(self, video): + color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray]) + return color_distort(video) + + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip_test + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = F.resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = F.resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = F.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class CornerCrop(object): + + def __init__(self, size, crop_position=None): + self.size = size + if crop_position is None: + self.randomize = True + else: + self.randomize = False + self.crop_position = crop_position + self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br'] + + def __call__(self, imgs): + t, h, w, c = imgs.shape + corner_imgs = list() + for n in self.crop_positions: + #print(n) + if n == 'c': + th, tw = (self.size, self.size) + x1 = int(round((w- tw) / 2.)) + y1 = int(round((h - th) / 2.)) + x2 = x1 + tw + y2 = y1 + th + elif n == 'tl': + x1 = 0 + y1 = 0 + x2 = self.size + y2 = self.size + elif n == 'tr': + x1 = w - self.size + y1 = 0 + x2 = w + y2 = self.size + elif n == 'bl': + x1 = 0 + y1 = h - self.size + x2 = self.size + y2 = h + elif n == 'br': + x1 = w - self.size + y1 = h - self.size + x2 = w + y2 = h + corner_imgs.append(imgs[:, y1:y2, x1:x2, :]) + return corner_imgs + + def randomize_parameters(self): + if self.randomize: + self.crop_position = self.crop_positions[random.randint( + 0, + len(self.crop_positions) - 1)] + + +class RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class STA_RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + bsz = len(clip) + angle = random.uniform(self.degrees[0], self.degrees[1]) + angles = [(i+1)/(bsz+1) * angle for i in range(bsz)] + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class Each_RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + bsz = len(clip) + angles = [random.uniform(self.degrees[0], self.degrees[1]) for i in range(bsz)] + # print(angles) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = F.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip_test + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip_test (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img sync_dir function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class EachColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip_test + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip_test (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img sync_dir function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip_test with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this sync_dir + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This sync_dir acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip_test of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip_test. + """ + return F.normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class TensorToNumpy(object): + + def __init__(self): + print("convert to numpy") + + def __call__(self, clip): + np_clip = clip.permute(1, 2, 3, 0).cpu().detach().numpy() + pil_clip = [Image.fromarray(np.uint8(numpy_image)).convert('RGB') for numpy_image in np_clip] + return pil_clip diff --git a/Downstream/multi-modalities-downstream/CoTrain/transforms/video/videoaug.py b/Downstream/multi-modalities-downstream/CoTrain/transforms/video/videoaug.py new file mode 100644 index 0000000..a5ddf2a --- /dev/null +++ b/Downstream/multi-modalities-downstream/CoTrain/transforms/video/videoaug.py @@ -0,0 +1,99 @@ +# input: (C, T, H, W) output: (C, T, H, W) +def VideoTransform(mode='train', crop_size=224, backend='v100'): + if backend == 'a100': + print("initalize data augmentation for a100 gpus") + import CoTrain.transforms.video.video_transform as video_transform + from torchvision import transforms + # https://github.com/FingerRec/BE/blob/main/src/Contrastive/augment/video_transformations/volume_transforms.py + + input_mean = [0.48145466, 0.4578275, 0.40821073] + input_std = [0.26862954, 0.26130258, 0.27577711] + scale_size = crop_size * 256 // 224 + if mode == 'train': + global_transforms = transforms.Compose([ + video_transform.TensorToNumpy(), + # video_transform.Resize(int(crop_size * 1.2)), # 256/224 = 1.14 + video_transform.Resize(scale_size), + video_transform.RandomCrop(crop_size), + # video_transform.ColorJitter(0.5, 0.5, 0.25, 0.5), # color operation perimitted, damage attribute + video_transform.ClipToTensor(channel_nb=3), + # video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + video_transform.Normalize(mean=input_mean, std=input_std) + ]) + local_transforms = transforms.Compose([ + video_transform.TensorToNumpy(), + video_transform.Resize(crop_size), # 256/224 = 1.14 + video_transform.RandomCrop(96), + # video_transform.ColorJitter(0.5, 0.5, 0.25, 0.5), # color operation perimitted, damage attribute + video_transform.ClipToTensor(channel_nb=3), + # video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + video_transform.Normalize(mean=input_mean, std=input_std) + ]) + else: + global_transforms = transforms.Compose([ + video_transform.TensorToNumpy(), + # video_transform.Resize(int(crop_size * 1.2)), # 256 + video_transform.Resize(scale_size), + video_transform.CenterCrop(crop_size), # 224 + video_transform.ClipToTensor(channel_nb=3), + # video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + video_transform.Normalize(mean=input_mean, std=input_std) + ]) + local_transforms = transforms.Compose([ + video_transform.TensorToNumpy(), + video_transform.Resize(crop_size), # 256 + video_transform.CenterCrop(96), # 224 + video_transform.ClipToTensor(channel_nb=3), + # video_transform.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + video_transform.Normalize(mean=input_mean, std=input_std) + ]) + return [global_transforms, local_transforms] + else: + # for pytorch > 1.9.0, V100 + import pytorchvideo.transforms as video_transforms + # https://pytorchvideo.readthedocs.io/en/latest/api/transforms/transforms.html + global_transform = video_transforms.create_video_transform(mode=mode, min_size=int(crop_size*1.2), + max_size=int(crop_size*1.5), + crop_size=crop_size, + aug_type='randaug', # randaug/augmix + num_samples=None) # not use temporal sub sampling + local_transform = video_transforms.create_video_transform(mode=mode, min_size=crop_size, + max_size=int(crop_size*1.5), + crop_size=96, + aug_type='randaug', # randaug/augmix + num_samples=None) # not use temporal sub sampling + return [global_transform, local_transform] + + +def video_aug(videos, video_transform, byte=False): + if byte: + videos = videos.permute(1, 0, 2, 3).byte() # tchw -> cthw + else: + videos = videos.permute(1, 0, 2, 3) + # normal + # videos_tensor = [video_transform(videos).permute(1, 0, 2, 3)] # -> tchw + # dino + global_videos_tensor = [] + global_transform, local_transform = video_transform + # print(videos.type()) + # 2 GLOBAL views + for i in range(1): + global_videos_tensor.append(global_transform(videos).permute(1, 0, 2, 3)) + # 3 LOCAL VIEWS + # local_videos_tensor = [] + # for i in range(0): + # local_videos_tensor.append(local_transform(videos).permute(1, 0, 2, 3)) + return global_videos_tensor + # return [global_videos_tensor, local_videos_tensor] + + +# # dino +# def video_aug(videos, video_transform, byte=False): +# if byte: +# videos = videos.permute(1, 0, 2, 3) # tchw -> cthw +# else: +# videos = videos.permute(1, 0, 2, 3).byte() +# # two global view +# videos_tensor = [video_transform(videos).permute(1, 0, 2, 3), video_transform(videos).permute(1, 0, 2, 3)] # -> tchw +# # local views +# return videos_tensor \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/README.md b/Downstream/multi-modalities-downstream/README.md new file mode 100644 index 0000000..0868e98 --- /dev/null +++ b/Downstream/multi-modalities-downstream/README.md @@ -0,0 +1,50 @@ +# Multi-Modalities-Downstream + +This is an official implementation of the multi-modalities downstrem tasks in [InternVideo](https://arxiv.org/abs/2212.03191), including zero-shot action recognition, zero-shot multiple choice, and video question answering. + +## Usage + +### Pre-trained model preparation + +We currently provide the [B/16](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/InternVideo-MM-B-16.ckpt) model, please download the model from aliyun. You will also need original CLIP model ViT-B-16. Please modify the /path/to/model in the scripts accordingly. + +### Installation and data preparation + +The code is mostly based on [All-In-One](https://github.com/showlab/all-in-one). Please follow it for installation and data preparation. + +### Downstream tasks + +#### Zero-shot action recognition + +Please follow `scripts/zs_classify.sh`. We provide results on Kinetics-400 **test set**. + +| | B/16 | L/14 | +|:---| ----------- | ----------- | +|K400 | 56.65 | 64.25 | + +#### Zero-shot multiple choice + +Please follow `scripts/zs_choice_[dataset].sh`. We provide results on MSRVTT and LSMDC. + +| | B/16 | L/14 | +|:---| ----------- | ----------- | +|MSRVTT| 91.31 | 93.44 | +|LSMDC| 73.96 | 77.26 | + +#### Video question answering + +Please follow `scripts/finetune_[dataset].sh`. We provide results on MSRVTT, MSVD, and TGIF-FrameQA. + +| | B/16 | L/14 | +|:---| ----------- | ----------- | +|MSRVTT| 44.58 | 47.14 | +|MSVD| 51.77 | 55.54 | +|TGIF-Frame| 67.83 | 72.22 | + +## TODO + +The L/14 model is on its way. + +## Acknowledgement + +This repo is built based on [All-In-One](https://github.com/showlab/all-in-one), [CLIP](https://github.com/openai/CLIP), [CoCa](https://github.com/lucidrains/CoCa-pytorch) and [open_clip](https://github.com/mlfoundations/open_clip). diff --git a/Downstream/multi-modalities-downstream/requirement.txt b/Downstream/multi-modalities-downstream/requirement.txt new file mode 100644 index 0000000..875db84 --- /dev/null +++ b/Downstream/multi-modalities-downstream/requirement.txt @@ -0,0 +1,28 @@ +bidict==0.21.4 +decord==0.6.0 +demoji==1.1.0 +docutils==0.18.1 +editdistance==0.6.0 +einops==0.3.0 +ffmpeg_python==0.2.0 +imageio==2.9.0 +importlib_metadata==4.11.4 +ipdb==0.13.4 +matplotlib==3.4.3 +numpy==1.19.5 +opencv_python==4.6.0.66 +pandas==1.1.5 +pyarrow==2.0.0 +pytorch_lightning==1.6.4 +regex==2021.8.28 +sacred==0.8.2 +scikit_image==0.19.3 +scipy==1.7.1 +setuptools==58.2.0 +timm==0.4.5 +torch==1.9.0+cu111 +torchmetrics==0.9.1 +torchvision==0.10.0+cu111 +tqdm==4.64.0 +transformers==4.2.1 +tslearn==0.5.2 diff --git a/Downstream/multi-modalities-downstream/run.py b/Downstream/multi-modalities-downstream/run.py new file mode 100644 index 0000000..1968a42 --- /dev/null +++ b/Downstream/multi-modalities-downstream/run.py @@ -0,0 +1,155 @@ +import warnings + +from sklearn import ensemble +# This ignore the scheduler warning, see https://github.com/Lightning-AI/lightning/issues/5558 +warnings.filterwarnings("ignore", "Detected call of", UserWarning) + +import os +import copy +import pytorch_lightning as pl +from CoTrain.config import ex +from CoTrain.modules import CoTrainTransformerSS, CLIP +from CoTrain.datamodules.video.multitask_datamodule import MTDataModule +import datetime +from pytorch_lightning.utilities.cloud_io import get_filesystem +from pytorch_lightning.utilities.cloud_io import load as pl_load + +import torch +import numpy as np +from pytorch_lightning.strategies import DDPStrategy +torch.manual_seed(0) + + +class CustomDDPStrategy(DDPStrategy): + def configure_ddp(self): + super().configure_ddp() + self._model._set_static_graph() # THIS IS THE MAGIC LINE + + +def deterministic_index_select(x, dim, indices): + """ + input_tensor: Tensor + dim: dim + indices: 1D tensor + """ + tensor_transpose = torch.transpose(x, 0, dim) + return tensor_transpose[indices].transpose(dim, 0) + +@ex.automain +def main(_config): + _config = copy.deepcopy(_config) + pl.seed_everything(_config["seed"]) + + dm = MTDataModule(_config, dist=True) + if not _config["clip"]: + model = CoTrainTransformerSS(_config) + else: + model = CLIP(_config) + + # assert False, [n for n, p in model.named_parameters() if p.requires_grad][:10] + + exp_name = f'{_config["exp_name"]}' + + os.makedirs(_config["log_dir"], exist_ok=True) + checkpoint_callback = pl.callbacks.ModelCheckpoint( + save_top_k=_config["save_top_k"], + # every_n_epochs=_config["save_checkpoints_interval"], + every_n_train_steps=_config["val_check_interval"], + verbose=True, + monitor="contrastive/train/loss", + mode="min", + save_last=_config["save_last"], + dirpath=_config["model_dir"], + ) + now = datetime.datetime.now() + if not isinstance(_config["load_path"], str): + instance_name = f'{exp_name}_seed{_config["seed"]}_from_multiple' + else: + instance_name = f'{exp_name}_seed{_config["seed"]}_from_{"_".join(_config["load_path"].split("/")[-2:])[:-5]}' + logger = pl.loggers.TensorBoardLogger( + _config["log_dir"], + name=instance_name, + version="version_0", + ) + + lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step") + + summary_callback = pl.callbacks.ModelSummary(max_depth=1) + + callbacks = [checkpoint_callback, lr_callback, summary_callback] + + num_gpus = ( + _config["num_gpus"] + if isinstance(_config["num_gpus"], int) + else len(_config["num_gpus"]) + ) + # print all config at the begin + print('='*70+'Config: '+'='*70) + print(instance_name) + print(_config) + print('='*150) + + # notice _config["batch_size"] should be max length for all machines, eg. at least 1024 + grad_steps = _config["batch_size"] // ( + _config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"] + ) + assert grad_steps > 0, (_config["batch_size"], _config["per_gpu_batchsize"]) + + if not _config["clip_use_checkpoint"]: + # assert not _config["clip_use_checkpoint"], "Do not use gradient accumulation and checkpoint at the same time" + if _config["loss_names"]["openend_vqa"] >= 1: + find_unused_paramters = True + else: + find_unused_paramters = False + strategy = DDPStrategy(find_unused_parameters=find_unused_paramters) + else: + assert grad_steps == 1 + strategy = CustomDDPStrategy() + + max_steps = _config["max_steps"] if _config["max_steps"] is not None else None + resume_ckpt = _config["resume_from"] + + if max_steps == None: + max_steps = -1 + + trainer = pl.Trainer( + devices=_config["num_gpus"], + num_nodes=_config["num_nodes"], + precision=_config["precision"], + accelerator="gpu", + benchmark=True, + deterministic=False, + max_epochs=_config["max_epoch"] if max_steps == -1 else 100, + max_steps=max_steps, + callbacks=callbacks, + logger=logger, + # prepare_data_per_node=False, + replace_sampler_ddp=False, + accumulate_grad_batches=grad_steps, + log_every_n_steps=10, + fast_dev_run=_config["fast_dev_run"], + val_check_interval=_config["val_check_interval"], + strategy=strategy, + # show_progress_bar=False, + # progress_bar_refresh_rate=0 + ) + + fs = get_filesystem(resume_ckpt) + if fs.exists(resume_ckpt): + with fs.open(resume_ckpt, "rb") as f: + ckpt = torch.load(f, map_location='cpu') + # This hacks pl wrong steps for logger + global_step_offset = ckpt["global_step"] + trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset + del ckpt + pass + else: + resume_ckpt = None + + print("accumulate grad batches is: ", trainer.accumulate_grad_batches) + + if not _config["test_only"]: + with torch.autograd.set_detect_anomaly(True): + trainer.fit(model, datamodule=dm, ckpt_path=resume_ckpt) + else: + trainer.test(model, datamodule=dm, ckpt_path=resume_ckpt) diff --git a/Downstream/multi-modalities-downstream/scripts/finetune_msrvttqa.sh b/Downstream/multi-modalities-downstream/scripts/finetune_msrvttqa.sh new file mode 100755 index 0000000..a436fbc --- /dev/null +++ b/Downstream/multi-modalities-downstream/scripts/finetune_msrvttqa.sh @@ -0,0 +1,35 @@ +NUM_NODES=1 +NUM_GPUS=8 +JOB_NAME="vtc_msrvttqa_lr1e5" +MODEL_PREFIX="." +OUTPUT_DIR="outputs/outputs_clip_kc_nc_msrvttqa" +LOG_FILE="${OUTPUT_DIR}/logs/${JOB_NAME}/log.txt" +N_TASKS=`echo "${NUM_NODES}*${NUM_GPUS}" | bc` + +srun --async -p video -n${N_TASKS} --gres=gpu:${NUM_GPUS} \ + --ntasks-per-node=${NUM_GPUS} --cpus-per-task=14 -o ${LOG_FILE}\ + --quotatype reserved --job-name ${JOB_NAME} \ + --open-mode=append \ + python run.py with data_root=./meta_data num_gpus=${NUM_GPUS} num_nodes=${NUM_NODES} \ + per_gpu_batchsize=16 clip_finetune_msrvttqa \ + num_frames=8 \ + num_workers=8 \ + batch_size=512 \ + max_epoch=20 \ + model_dir=${OUTPUT_DIR}/models/${JOB_NAME} \ + log_dir=${OUTPUT_DIR}/result/${JOB_NAME} \ + resume_from=${OUTPUT_DIR}/models/${JOB_NAME}/last.ckpt \ + save_checkpoints_interval=1000 \ + learning_rate=1e-5 \ + clip_qa_type=vtc_cap \ + save_last=False \ + save_top_k=0 \ + max_steps=-1 \ + clip=/pathto/ViT-L-14.pt \ + clip_type=kc_new \ + load_path=/pathto/InternVideo-MM-L-14.ckpt + +rm batchscript-* + +while [ ! -f ${LOG_FILE} ] ; do sleep 1; done +tail -f ${LOG_FILE} \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/scripts/finetune_msvdqa.sh b/Downstream/multi-modalities-downstream/scripts/finetune_msvdqa.sh new file mode 100755 index 0000000..9ebfbfd --- /dev/null +++ b/Downstream/multi-modalities-downstream/scripts/finetune_msvdqa.sh @@ -0,0 +1,35 @@ +NUM_NODES=1 +NUM_GPUS=8 +JOB_NAME="vtc_msvd_lr1e5" +MODEL_PREFIX="." +OUTPUT_DIR="outputs/outputs_clip_kc_nc_msvdqa" +LOG_FILE="${OUTPUT_DIR}/logs/${JOB_NAME}/log.txt" +N_TASKS=`echo "${NUM_NODES}*${NUM_GPUS}" | bc` + +srun --async -p video -n${N_TASKS} --gres=gpu:${NUM_GPUS} \ + --ntasks-per-node=${NUM_GPUS} --cpus-per-task=14 -o ${LOG_FILE}\ + --quotatype reserved --job-name ${JOB_NAME} \ + --open-mode=append \ + python run.py with data_root=./meta_data num_gpus=${NUM_GPUS} num_nodes=${NUM_NODES} \ + per_gpu_batchsize=16 clip_finetune_msvdqa \ + num_frames=8 \ + num_workers=8 \ + batch_size=512 \ + max_epoch=20 \ + model_dir=${OUTPUT_DIR}/models/${JOB_NAME} \ + log_dir=${OUTPUT_DIR}/result/${JOB_NAME} \ + resume_from=${OUTPUT_DIR}/models/${JOB_NAME}/last.ckpt \ + save_checkpoints_interval=1000 \ + learning_rate=1e-5 \ + clip_qa_type=vtc_cap \ + save_last=False \ + save_top_k=0 \ + max_steps=-1 \ + clip=/pathto/ViT-L-14.pt \ + clip_type=kc_new \ + load_path=/pathto/InternVideo-MM-L-14.ckpt + +rm batchscript-* + +while [ ! -f ${LOG_FILE} ] ; do sleep 1; done +tail -f ${LOG_FILE} \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/scripts/finetune_tgifqa.sh b/Downstream/multi-modalities-downstream/scripts/finetune_tgifqa.sh new file mode 100755 index 0000000..dd51c36 --- /dev/null +++ b/Downstream/multi-modalities-downstream/scripts/finetune_tgifqa.sh @@ -0,0 +1,35 @@ +NUM_NODES=1 +NUM_GPUS=8 +JOB_NAME="vtc_tgifqa_lr1e5" +MODEL_PREFIX="." +OUTPUT_DIR="outputs/outputs_clip_kc_nc_tgifqa" +LOG_FILE="${OUTPUT_DIR}/logs/${JOB_NAME}/log.txt" +N_TASKS=`echo "${NUM_NODES}*${NUM_GPUS}" | bc` + +srun --async -p video -n${N_TASKS} --gres=gpu:${NUM_GPUS} \ + --ntasks-per-node=${NUM_GPUS} --cpus-per-task=16 -o ${LOG_FILE}\ + --quotatype reserved --job-name ${JOB_NAME} \ + --open-mode=append \ + python run.py with data_root=./meta_data num_gpus=${NUM_GPUS} num_nodes=${NUM_NODES} \ + per_gpu_batchsize=32 clip_finetune_tgifqa \ + num_frames=8 \ + num_workers=8 \ + batch_size=512 \ + max_epoch=20 \ + model_dir=${OUTPUT_DIR}/models/${JOB_NAME} \ + log_dir=${OUTPUT_DIR}/result/${JOB_NAME} \ + resume_from=${OUTPUT_DIR}/models/${JOB_NAME}/last.ckpt \ + save_checkpoints_interval=1000 \ + learning_rate=1e-5 \ + clip_qa_type=vtc_cap \ + save_last=False \ + save_top_k=0 \ + max_steps=-1 \ + clip=/pathto/ViT-L-14.pt \ + clip_type=kc_new \ + load_path=/pathto/InternVideo-MM-L-14.ckpt + +rm batchscript-* + +while [ ! -f ${LOG_FILE} ] ; do sleep 1; done +tail -f ${LOG_FILE} \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/scripts/zs_choice_lsmdc.sh b/Downstream/multi-modalities-downstream/scripts/zs_choice_lsmdc.sh new file mode 100755 index 0000000..22f8163 --- /dev/null +++ b/Downstream/multi-modalities-downstream/scripts/zs_choice_lsmdc.sh @@ -0,0 +1,43 @@ +set -e + +unset https_proxy +unset http_proxy +unset all_proxy +unset HTTPS_PROXY +unset HTTP_PROXY +unset ALL_PROXY +echo "Proxy is unset!" + +###################### zero-shot test ###################### +NUM_NODES=1 +NUM_GPUS=8 +BATCH_SIZE=`echo "1024*${NUM_NODES}" | bc` +JOB_NAME="zs_choice" +MODEL_PREFIX="." +OUTPUT_DIR="outputs/outputs_clip_kc_nc" +LOG_FILE="${OUTPUT_DIR}/logs/${JOB_NAME}/log.txt" +N_TASKS=`echo "${NUM_NODES}*${NUM_GPUS}" | bc` + +srun --async -p video -n${N_TASKS} --gres=gpu:${NUM_GPUS} \ + --ntasks-per-node=${NUM_GPUS} --cpus-per-task=14 -o ${LOG_FILE}\ + --quotatype auto --job-name ${JOB_NAME} \ + --open-mode=append \ + python run.py with data_root=s3://video_pub/ num_gpus=${NUM_GPUS} num_nodes=${NUM_NODES} \ + per_gpu_batchsize=128 clip_kc_nc_finetune_msrvttchoice test_only=True \ + batch_size=${BATCH_SIZE} \ + num_frames=8 num_workers=12 \ + model_dir=${MODEL_PREFIX}/${OUTPUT_DIR}/models/${JOB_NAME} \ + log_dir=${OUTPUT_DIR}/result/${JOB_NAME} \ + 'video_datasets=["lsmdc_choice"]' \ + clip_wiseft_coef=0.5 \ + clip=/pathto/ViT-L-14.pt \ + clip_type=kc_new \ + load_path=/pathto/InternVideo-MM-L-14.ckpt + +rm batchscript-* + + +rm batchscript-* + +while [ ! -f ${LOG_FILE} ] ; do sleep 1; done +tail -f ${LOG_FILE} \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/scripts/zs_choice_msrvtt.sh b/Downstream/multi-modalities-downstream/scripts/zs_choice_msrvtt.sh new file mode 100755 index 0000000..1a4d515 --- /dev/null +++ b/Downstream/multi-modalities-downstream/scripts/zs_choice_msrvtt.sh @@ -0,0 +1,40 @@ +set -e + +unset https_proxy +unset http_proxy +unset all_proxy +unset HTTPS_PROXY +unset HTTP_PROXY +unset ALL_PROXY +echo "Proxy is unset!" + +###################### zero-shot test ###################### +NUM_NODES=1 +NUM_GPUS=8 +BATCH_SIZE=`echo "1024*${NUM_NODES}" | bc` +JOB_NAME="zs_choice" +MODEL_PREFIX="." +OUTPUT_DIR="outputs/outputs_clip_kc_nc" +LOG_FILE="${OUTPUT_DIR}/logs/${JOB_NAME}/log.txt" +N_TASKS=`echo "${NUM_NODES}*${NUM_GPUS}" | bc` + +srun --async -p video -n${N_TASKS} --gres=gpu:${NUM_GPUS} \ + --ntasks-per-node=${NUM_GPUS} --cpus-per-task=14 -o ${LOG_FILE}\ + --quotatype auto --job-name ${JOB_NAME} \ + --open-mode=append \ + python run.py with data_root=s3://video_pub/ num_gpus=${NUM_GPUS} num_nodes=${NUM_NODES} \ + per_gpu_batchsize=128 clip_kc_nc_finetune_msrvttchoice test_only=True \ + batch_size=${BATCH_SIZE} \ + num_frames=8 num_workers=12 \ + model_dir=${MODEL_PREFIX}/${OUTPUT_DIR}/models/${JOB_NAME} \ + log_dir=${OUTPUT_DIR}/result/${JOB_NAME} \ + 'video_datasets=["msrvtt_choice"]' \ + clip_wiseft_coef=0.5 \ + clip=/pathto/ViT-L-14.pt \ + clip_type=kc_new \ + load_path=/pathto/InternVideo-MM-L-14.ckpt + +rm batchscript-* + +while [ ! -f ${LOG_FILE} ] ; do sleep 1; done +tail -f ${LOG_FILE} \ No newline at end of file diff --git a/Downstream/multi-modalities-downstream/scripts/zs_classify.sh b/Downstream/multi-modalities-downstream/scripts/zs_classify.sh new file mode 100755 index 0000000..5a5fa2b --- /dev/null +++ b/Downstream/multi-modalities-downstream/scripts/zs_classify.sh @@ -0,0 +1,39 @@ +set -e + +unset https_proxy +unset http_proxy +unset all_proxy +unset HTTPS_PROXY +unset HTTP_PROXY +unset ALL_PROXY +echo "Proxy is unset!" + +###################### zero-shot test ###################### +NUM_NODES=1 +NUM_GPUS=8 +BATCH_SIZE=`echo "256*${NUM_NODES}" | bc` +JOB_NAME="zs_classify" +MODEL_PREFIX="." +OUTPUT_DIR="outputs/outputs_clip_kc_nc" +LOG_FILE="${OUTPUT_DIR}/logs/${JOB_NAME}/log.txt" +N_TASKS=`echo "${NUM_NODES}*${NUM_GPUS}" | bc` + +srun --async -p video -n${N_TASKS} --gres=gpu:${NUM_GPUS} \ + --ntasks-per-node=${NUM_GPUS} --cpus-per-task=14 -o ${LOG_FILE}\ + --quotatype auto --job-name ${JOB_NAME} \ + --open-mode=append \ + python run.py with data_root=s3://video_pub/ num_gpus=${NUM_GPUS} num_nodes=${NUM_NODES} \ + per_gpu_batchsize=32 clip_finetune_zs_k400 test_only=True \ + batch_size=${BATCH_SIZE} \ + num_frames=8 num_workers=12 \ + model_dir=${MODEL_PREFIX}/${OUTPUT_DIR}/models/${JOB_NAME} \ + log_dir=${OUTPUT_DIR}/result/${JOB_NAME} \ + clip_wiseft_coef=0.5 \ + clip=/pathto/ViT-L-14.pt \ + clip_type=kc_new \ + load_path=/pathto/InternVideo-MM-L-14.ckpt + +rm batchscript-* + +while [ ! -f ${LOG_FILE} ] ; do sleep 1; done +tail -f ${LOG_FILE} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..838ad89 --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ +Copyright (c) OpenGVLab. All rights reserved + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Media/download.png b/Media/download.png new file mode 100644 index 0000000..561ca8f Binary files /dev/null and b/Media/download.png differ diff --git a/Pretrain/Multi-Modalities-Pretraining/.gitignore b/Pretrain/Multi-Modalities-Pretraining/.gitignore new file mode 100644 index 0000000..6769e21 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ \ No newline at end of file diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/__init__.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/__init__.py new file mode 100644 index 0000000..d0130cc --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/__init__.py @@ -0,0 +1 @@ +from .internvideo import * \ No newline at end of file diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/bpe_simple_vocab_16e6.txt.gz b/Pretrain/Multi-Modalities-Pretraining/InternVideo/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000..7b5088a Binary files /dev/null and b/Pretrain/Multi-Modalities-Pretraining/InternVideo/bpe_simple_vocab_16e6.txt.gz differ diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/__init__.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/__init__.py new file mode 100644 index 0000000..dcc5619 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/__init__.py @@ -0,0 +1 @@ +from .clip import * diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/clip.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/clip.py new file mode 100644 index 0000000..f0972e2 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/clip.py @@ -0,0 +1,252 @@ +import hashlib +import os +import urllib +import warnings +from typing import Any, Union, List +from pkg_resources import packaging + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"): + warnings.warn("PyTorch version 1.7.1 or higher is recommended") + + +__all__ = ["available_models", "load", "tokenize"] +# _tokenizer = _Tokenizer() + +_MODELS = { + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", + "ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", +} + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load( + name: str, + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, download_root: str = None, + # evl + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.0, 0.0, 0.0, 0.0], cls_dropout=0.5, t_size=8, spatial_size=14, + use_t_conv=True, use_image_attnmap=True, use_t_pos_embed=True, dropout=0.0, no_pretrain=False, init_zero=True, + use_checkpoint=False, checkpoint_num=[0, 0, 0], + ): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model or more hackable non-JIT model (default). + + download_root: str + path to download the model files; by default, it uses "~/.cache/clip" + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + with open(model_path, 'rb') as opened_file: + try: + # loading JIT archive + model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + + if not jit: + model = build_model( + state_dict or model.state_dict(), + n_layers=n_layers, n_dim=n_dim, n_head=n_head, mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, cls_dropout=cls_dropout, t_size=t_size, spatial_size=spatial_size, + use_t_conv=use_t_conv, use_image_attnmap=use_image_attnmap, use_t_pos_embed=use_t_pos_embed, no_pretrain=no_pretrain, + init_zero=init_zero, use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, + ).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + try: + graphs = [module.graph] if hasattr(module, "graph") else [] + except RuntimeError: + graphs = [] + + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False, return_special_tokens_mask: bool = False) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens):] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/model.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/model.py new file mode 100644 index 0000000..0e98c73 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/model.py @@ -0,0 +1,329 @@ +from collections import OrderedDict +from typing import Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from torch.utils.checkpoint import checkpoint_sequential + +from . import utils + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential( + OrderedDict( + [ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)), + ] + ) + ) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: torch.Tensor): + self.attn_mask = ( + self.attn_mask.to(dtype=x.dtype, device=x.device) + if self.attn_mask is not None + else None + ) + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + attn_mask: torch.Tensor = None, + use_checkpoint=False, + checkpoint_num=[0, 0], + ): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential( + *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)] + ) + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + + def forward(self, x: torch.Tensor): + if self.use_checkpoint and self.checkpoint_num[1] > 0: + segments = min(len(self.resblocks), self.checkpoint_num[1]) + return checkpoint_sequential(self.resblocks, segments, x) + else: + return self.resblocks(x) + + +class VideoIntern(nn.Module): + def __init__( + self, + embed_dim: int, + # vision + vision_width: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + # uni + n_layers=4, + n_dim=768, + n_head=12, + drop_path_rate=0.0, + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + use_image_attnmap=True, + backbone='vit_2plus1d_dw_bias_b16', + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + use_checkpoint=False, + checkpoint_num=[0], + ): + super().__init__() + + self.vision_width = n_dim + + self.context_length = context_length + + self.visual = utils.__dict__[backbone]( + pretrained=False, + t_size=t_size, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + n_dim=n_dim, + n_head=n_head, + return_list=return_list, + drop_path_rate=drop_path_rate, + backbone_drop_path_rate=drop_path_rate, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + ) + + self.visual_ln_post = nn.LayerNorm(n_dim) + scale = n_dim**-0.5 + self.visual_proj = nn.Parameter(scale * torch.randn(n_dim, embed_dim)) + self.return_qk = use_image_attnmap + self.return_num = n_layers + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask(), + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter( + torch.empty(self.context_length, transformer_width) + ) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.embed_dim = embed_dim + + # We seperate the mask embedding to load pretrained model + self.text_mask_embedding = nn.Parameter(torch.empty(1, 1, transformer_width)) + + # # To keep the num_embeddings unchanged, we add this to embedded text + # self.eot_token_embedding = nn.Parameter(torch.empty(1, transformer_width)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + nn.init.normal_(self.text_mask_embedding, std=0.02) + # nn.init.constant_(self.eot_token_embedding, 0.0) + + proj_std = (self.transformer.width**-0.5) * ( + (2 * self.transformer.layers) ** -0.5 + ) + attn_std = self.transformer.width**-0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5) + + nn.init.constant_(self.visual_ln_post.weight, 1.0) + nn.init.constant_(self.visual_ln_post.bias, 0.0) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_video( + self, video, return_all_feats=False, masked_indices=None, mode="video" + ): + # video: [N, C, T, H, W] + feats = self.visual(video, return_all_feats=return_all_feats, mode=mode) + if return_all_feats: + x, feats = feats + else: + x = feats + x = self.visual_ln_post(x) + if self.visual_proj is not None: + x = x @ self.visual_proj + + if return_all_feats: + return x, feats # [N, C], [L, N, T, C] + + return x + + def encode_text(self, text, masked_indices=None, return_all_feats=False): + # assert (text.max(dim=-1)[0] + 1 == self.token_embedding.num_embeddings).all(), \ + # "The last token of each sentence should be eot_token, check the input" + + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + # x[torch.arange(x.shape[0]), text.argmax(dim=-1)] += self.eot_token_embedding + + if masked_indices is not None: + x[masked_indices] = self.text_mask_embedding + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + feats = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] + if self.text_projection is not None: + feats = feats @ self.text_projection + + if return_all_feats: + return feats, x + + return feats + + +def build_model( + state_dict: dict, + n_layers=4, + n_dim=768, + n_head=12, + mlp_factor=4.0, + drop_path_rate=0.0, + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, + t_size=8, + spatial_size=14, + use_t_conv=True, + use_image_attnmap=True, + use_t_pos_embed=True, + no_pretrain=False, + init_zero=True, + use_checkpoint=False, + checkpoint_num=[0], +): + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [ + k + for k in state_dict.keys() + if k.startswith("visual.") and k.endswith(".attn.in_proj_weight") + ] + ) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len( + set( + k.split(".")[2] + for k in state_dict + if k.startswith(f"transformer.resblocks") + ) + ) + + vision_width = state_dict["visual_proj"].shape[0] + n_dim = vision_width + if vision_width == 768: + backbone = "vit_only_global_b16" + n_head = 12 + return_list = [8, 9, 10, 11] + elif vision_width == 1024: + backbone = "vit_only_global_l14" + n_head = 16 + return_list = [20, 21, 22, 23] + else: + raise NotImplementedError + + model = VideoIntern( + embed_dim, + vision_width, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + t_size=t_size, + use_image_attnmap=use_image_attnmap, + backbone=backbone, + return_list=return_list, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + ) + + model.load_state_dict(state_dict, strict=False) + + return model.eval() diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/simple_tokenizer.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/__init__.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/__init__.py new file mode 100644 index 0000000..2dd97f0 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/__init__.py @@ -0,0 +1,2 @@ +# from .evl_module import TransformerDecoder +from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 \ No newline at end of file diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention.py new file mode 100644 index 0000000..28ce56d --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.modules.linear import Linear +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn.modules.module import Module + +from .attention_module import multi_head_attention_forward + + +class _LinearWithBias(Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +class MultiheadAttention(Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, return_qk=False): + # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor], bool) -> Tuple[Tensor, Optional[Tensor]] + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - return_qk: whether return Q and K. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if return_qk: + if not self._qkv_same_embed_dim: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, return_qk=True) + else: + q, k, attn_output, attn_output_weights = multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, return_qk=True) + return q, k, attn_output, attn_output_weights + else: + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask) diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module.py new file mode 100644 index 0000000..277fd32 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module.py @@ -0,0 +1,315 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import pad, linear, softmax, dropout + +Tensor = torch.Tensor + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = softmax( + attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module_bias.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module_bias.py new file mode 100644 index 0000000..969eedd --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module_bias.py @@ -0,0 +1,318 @@ +r"""Functional interface""" +import warnings +import math + +import torch +from torch import _VF +from torch._jit_internal import Optional, Tuple +from torch.overrides import has_torch_function, handle_torch_function + +from torch.nn.functional import pad, linear, softmax, dropout + +Tensor = torch.Tensor + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + return_qk: bool = False, + rpb: Tensor = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - return_qk: whether return Q and K. + - rpb: relative postion bias + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = linear(query, q_proj_weight_non_opt, in_proj_bias) + k = linear(key, k_proj_weight_non_opt, in_proj_bias) + v = linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + # L, N, E + if return_qk: + return_q = q.clone() / scaling + return_k = k.clone() + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + if rpb is not None: + attn_output_weights = attn_output_weights + rpb + attn_output_weights = softmax(attn_output_weights, dim=-1) + attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + + if return_qk: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return return_q, return_k, attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return return_q, return_k, attn_output, None + else: + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/clip_vit_only_global.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/clip_vit_only_global.py new file mode 100644 index 0000000..6ff3414 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/clip_vit_only_global.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +import os +from collections import OrderedDict + +from timm.models.layers import DropPath +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint + +from .attention import MultiheadAttention + +import logging +logger = logging.getLogger(__name__) + +MODEL_PATH = '/mnt/lustre/share_data/likunchang.vendor/model' +_MODELS = { + "ViT-B/32": os.path.join(MODEL_PATH, "vit_b32.pth"), + "ViT-B/16": os.path.join(MODEL_PATH, "vit_b16.pth"), + "ViT-L/14": os.path.join(MODEL_PATH, "vit_l14.pth"), + "ViT-L/14_336": os.path.join(MODEL_PATH, "vit_l14_336.pth"), +} + + +def conv_1x1x1(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) + +def conv_3x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (3, 3, 3), (1, 1, 1), (1, 1, 1), groups=groups) + +def conv_1x3x3(inp, oup, groups=1): + return nn.Conv3d(inp, oup, (1, 3, 3), (1, 1, 1), (0, 1, 1), groups=groups) + +def bn_3d(dim): + return nn.BatchNorm3d(dim) + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, drop_path=0.0, + ): + super().__init__() + + self.n_head = n_head + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + logger.info(f'Drop path rate: {drop_path}') + + # spatial + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x, T=8, use_checkpoint=False): + # x: 1+HW, NT, C + # MHSA + if use_checkpoint: + attn_out = checkpoint.checkpoint(self.attention, self.ln_1(x)) + x = x + self.drop_path(attn_out) + else: + x = x + self.drop_path(self.attention(self.ln_1(x))) + # FFN + if use_checkpoint: + mlp_out = checkpoint.checkpoint(self.mlp, self.ln_2(x)) + x = x + self.drop_path(mlp_out) + else: + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Extractor(nn.Module): + def __init__( + self, d_model, n_head, attn_mask=None, + mlp_factor=4.0, dropout=0.0, drop_path=0.0, + ): + super().__init__() + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + logger.info(f'Drop path rate: {drop_path}') + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = nn.LayerNorm(d_model) + d_mlp = round(mlp_factor * d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_mlp)), + ("gelu", QuickGELU()), + ("dropout", nn.Dropout(dropout)), + ("c_proj", nn.Linear(d_mlp, d_model)) + ])) + self.ln_2 = nn.LayerNorm(d_model) + self.ln_3 = nn.LayerNorm(d_model) + self.attn_mask = attn_mask + + # zero init + nn.init.xavier_uniform_(self.attn.in_proj_weight) + nn.init.constant_(self.attn.out_proj.weight, 0.) + nn.init.constant_(self.attn.out_proj.bias, 0.) + nn.init.xavier_uniform_(self.mlp[0].weight) + nn.init.constant_(self.mlp[-1].weight, 0.) + nn.init.constant_(self.mlp[-1].bias, 0.) + + def attention(self, x: torch.Tensor, y: torch.Tensor): + #self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + # return self.attn(x, y, y, need_weights=False, attn_mask=self.attn_mask)[0] + assert self.attn_mask is None # not implemented + # manual forward to add position information + d_model = self.ln_1.weight.size(0) + q = (x @ self.attn.in_proj_weight[:d_model].T) + self.attn.in_proj_bias[:d_model] + + k = (y @ self.attn.in_proj_weight[d_model:-d_model].T) + self.attn.in_proj_bias[d_model:-d_model] + v = (y @ self.attn.in_proj_weight[-d_model:].T) + self.attn.in_proj_bias[-d_model:] + Tx, Ty, N = q.size(0), k.size(0), q.size(1) + q = q.view(Tx, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + k = k.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + v = v.view(Ty, N, self.attn.num_heads, self.attn.head_dim).permute(1, 2, 0, 3) + aff = (q @ k.transpose(-2, -1) / (self.attn.head_dim ** 0.5)) + + aff = aff.softmax(dim=-1) + out = aff @ v + out = out.permute(2, 0, 1, 3).flatten(2) + out = self.attn.out_proj(out) + return out + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x = x + self.drop_path(self.attention(self.ln_1(x), self.ln_3(y))) + x = x + self.drop_path(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, width, layers, heads, attn_mask=None, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + super().__init__() + self.T = t_size + self.return_list = return_list + # Backbone + b_dpr = [x.item() for x in torch.linspace(0, backbone_drop_path_rate, layers)] + self.resblocks = nn.ModuleList([ + ResidualAttentionBlock( + width, heads, attn_mask, + drop_path=b_dpr[i], + ) for i in range(layers) + ]) + # checkpoint + self.use_checkpoint = use_checkpoint + self.checkpoint_num = checkpoint_num + logger.info(f'Use checkpoint: {self.use_checkpoint}') + logger.info(f'Checkpoint number: {self.checkpoint_num}') + + # Extractor + assert n_layers == len(return_list) + self.temporal_cls_token = nn.Parameter(torch.zeros(1, 1, n_dim)) + self.dpe = nn.ModuleList([ + nn.Conv3d(n_dim, n_dim, kernel_size=3, stride=1, padding=1, bias=True, groups=n_dim) + for i in range(n_layers) + ]) + for m in self.dpe: + nn.init.constant_(m.bias, 0.) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, n_layers)] + self.dec = nn.ModuleList([ + Extractor( + n_dim, n_head, mlp_factor=mlp_factor, + dropout=mlp_dropout[i], drop_path=dpr[i], + ) for i in range(n_layers) + ]) + # # projection + # self.proj = nn.Sequential( + # nn.LayerNorm(n_dim), + # nn.Dropout(cls_dropout), + # nn.Linear(n_dim, num_classes), + # ) + self.balance = nn.Parameter(torch.zeros((n_dim))) + self.sigmoid = nn.Sigmoid() + + def forward(self, x, mode='video', return_all_feats=False): + if mode == 'video': + T_down = self.T + else: + T_down = 1 + L, NT, C = x.shape + N = NT // T_down + H = W = int((L - 1) ** 0.5) + cls_token = self.temporal_cls_token.repeat(1, N, 1) + + j = -1 + for i, resblock in enumerate(self.resblocks): + if self.use_checkpoint and i < self.checkpoint_num[0]: + x = resblock(x, T_down, use_checkpoint=True) + else: + x = resblock(x, T_down) + if i in self.return_list: + j += 1 + tmp_x = x.clone() + tmp_x = tmp_x.view(L, N, T_down, C) + # dpe + _, tmp_feats = tmp_x[:1], tmp_x[1:] + tmp_feats = tmp_feats.permute(1, 3, 2, 0).reshape(N, C, T_down, H, W) + tmp_feats = self.dpe[j](tmp_feats).view(N, C, T_down, L - 1).permute(3, 0, 2, 1) + tmp_x[1:] = tmp_x[1:] + tmp_feats + # enhancer + tmp_x = tmp_x.permute(2, 0, 1, 3).flatten(0, 1) # T * L, N, C + cls_token = self.dec[j](cls_token, tmp_x) + + weight = self.sigmoid(self.balance) + residual = x.view(L, N, T_down, C)[0].mean(1) # L, N, T, C + # return self.proj((1 - weight) * cls_token[0, :, :] + weight * residual) + feats = (1 - weight) * cls_token[0, :, :] + weight * residual + if return_all_feats: + return feats, x.view(L, N, T_down, C) + + return feats + + +class VisionTransformer(nn.Module): + def __init__( + self, + # backbone + input_resolution, patch_size, width, layers, heads, output_dim, backbone_drop_path_rate=0., + use_checkpoint=False, checkpoint_num=[0], t_size=8, + # extractor + return_list=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], + n_layers=12, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + + ): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv3d(3, width, (1, patch_size, patch_size), (1, patch_size, patch_size), (0, 0, 0), bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer( + width, layers, heads, + backbone_drop_path_rate=backbone_drop_path_rate, + use_checkpoint=use_checkpoint, checkpoint_num=checkpoint_num, t_size=t_size, + return_list=return_list, n_layers=n_layers, n_dim=n_dim, n_head=n_head, + mlp_factor=mlp_factor, drop_path_rate=drop_path_rate, mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, num_classes=num_classes, + ) + + def forward(self, x, mode='video', return_all_feats=False): + x = self.conv1(x) # shape = [*, width, grid, grid] + N, C, T, H, W = x.shape + x = x.permute(0, 2, 3, 4, 1).reshape(N * T, H * W, C) + + x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + out = self.transformer(x, mode=mode, return_all_feats=return_all_feats) + return out + + +def inflate_weight(weight_2d, time_dim, center=True): + if center: + weight_3d = torch.zeros(*weight_2d.shape) + weight_3d = weight_3d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + middle_idx = time_dim // 2 + weight_3d[:, :, middle_idx, :, :] = weight_2d + else: + weight_3d = weight_2d.unsqueeze(2).repeat(1, 1, time_dim, 1, 1) + weight_3d = weight_3d / time_dim + return weight_3d + + +def load_state_dict(model, state_dict): + state_dict_3d = model.state_dict() + for k in state_dict.keys(): + if state_dict[k].shape != state_dict_3d[k].shape: + if len(state_dict_3d[k].shape) <= 2: + logger.info(f'Ignore: {k}') + continue + logger.info(f'Inflate: {k}, {state_dict[k].shape} => {state_dict_3d[k].shape}') + time_dim = state_dict_3d[k].shape[2] + state_dict[k] = inflate_weight(state_dict[k], time_dim) + model.load_state_dict(state_dict, strict=False) + + +def vit_only_global_b32( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, + ): + model = VisionTransformer( + input_resolution=224, + patch_size=32, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/32"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_b16( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[8, 9, 10, 11], + n_layers=4, n_dim=768, n_head=12, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=16, + width=768, + layers=12, + heads=12, + output_dim=512, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-B/16"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=224, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +def vit_only_global_l14_336( + pretrained=True, use_checkpoint=False, checkpoint_num=[0], + t_size=16, backbone_drop_path_rate=0., + return_list=[20, 21, 22, 23], + n_layers=4, n_dim=1024, n_head=16, mlp_factor=4.0, drop_path_rate=0., + mlp_dropout=[0.5, 0.5, 0.5, 0.5], + cls_dropout=0.5, num_classes=400, +): + model = VisionTransformer( + input_resolution=336, + patch_size=14, + width=1024, + layers=24, + heads=16, + output_dim=768, + use_checkpoint=use_checkpoint, + checkpoint_num=checkpoint_num, + t_size=t_size, + backbone_drop_path_rate=backbone_drop_path_rate, + return_list=return_list, + n_layers=n_layers, + n_dim=n_dim, + n_head=n_head, + mlp_factor=mlp_factor, + drop_path_rate=drop_path_rate, + mlp_dropout=mlp_dropout, + cls_dropout=cls_dropout, + num_classes=num_classes, + ) + + if pretrained: + logger.info('load pretrained weights') + state_dict = torch.load(_MODELS["ViT-L/14_336"], map_location='cpu') + load_state_dict(model, state_dict) + return model.eval() + + +if __name__ == '__main__': + import time + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import flop_count_table + import numpy as np + + seed = 4217 + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + num_frames = 8 + + model = vit_only_global_l14( + pretrained=False, + t_size=num_frames, backbone_drop_path_rate=0.2, drop_path_rate=0.4, + use_checkpoint=True, checkpoint_num=[0], + ) + + flops = FlopCountAnalysis(model, torch.rand(1, 3, num_frames, 224, 224)) + s = time.time() + logger.info(flop_count_table(flops, max_depth=1)) + logger.info(time.time()-s) \ No newline at end of file diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/internvideo.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/internvideo.py new file mode 100644 index 0000000..85c76c3 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/internvideo.py @@ -0,0 +1,100 @@ +import torch +import numpy as np +import decord + +from typing import Any, OrderedDict, Union, List +from pkg_resources import packaging +from torchvision import transforms + +from . import video_transform +from .simple_tokenizer import SimpleTokenizer as _Tokenizer +from .clip_utils.model import build_model + + +__all__ = ["load_model", "load_video", "tokenize"] +_tokenizer = _Tokenizer() + + +def load_model(path): + state = torch.load(path, map_location="cpu")["state_dict"] + state = {k[len("clip.") :]: v for k, v in state.items() if k.startswith("clip.")} + model = build_model(state_dict=state) + return model + + +def load_video(path): + video_reader = decord.VideoReader(path, num_threads=1, ctx=decord.cpu(0)) + decord.bridge.set_bridge('torch') + video_len = len(video_reader) + video = video_reader.get_batch(np.linspace(0, video_len - 1, 8).astype(np.intc)).byte() + video = video.permute(3, 0, 1, 2) + + input_mean = [0.48145466, 0.4578275, 0.40821073] + input_std = [0.26862954, 0.26130258, 0.27577711] + crop_size, scale_size = 224, 256 + trans = transforms.Compose([ + video_transform.TensorToNumpy(), + video_transform.Resize(scale_size), + video_transform.CenterCrop(crop_size), + video_transform.ClipToTensor(channel_nb=3), + video_transform.Normalize(mean=input_mean, std=input_std) + ]) + + video = trans(video) + return video + + +def tokenize( + texts: Union[str, List[str]], + context_length: int = 77, + truncate: bool = False, + return_special_tokens_mask: bool = False, +) -> Union[torch.IntTensor, torch.LongTensor, torch.BoolTensor]: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + truncate: bool + Whether to truncate the text in case its encoding is longer than the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]. + We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long. + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"): + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + else: + result = torch.zeros(len(all_tokens), context_length, dtype=torch.int) + + special_tokens_mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + if truncate: + tokens = tokens[:context_length] + tokens[-1] = eot_token + else: + raise RuntimeError( + f"Input {texts[i]} is too long for context length {context_length}" + ) + result[i, : len(tokens)] = torch.tensor(tokens) + special_tokens_mask[i, len(tokens) :] = 1 + + if return_special_tokens_mask: + return result, special_tokens_mask + + return result diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/simple_tokenizer.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/simple_tokenizer.py new file mode 100644 index 0000000..0a66286 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/simple_tokenizer.py @@ -0,0 +1,132 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') + merges = merges[1:49152-256-2+1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v+'' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + ( token[-1] + '',) + pairs = get_pairs(word) + + if not pairs: + return token+'' + + while True: + bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word)-1 and word[i+1] == second: + new_word.append(first+second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ') + return text diff --git a/Pretrain/Multi-Modalities-Pretraining/InternVideo/video_transform.py b/Pretrain/Multi-Modalities-Pretraining/InternVideo/video_transform.py new file mode 100644 index 0000000..78a501d --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/InternVideo/video_transform.py @@ -0,0 +1,752 @@ +import numbers +import random +import numpy as np +import PIL +import skimage +import skimage.transform +import torchvision +import torch +from torchvision import transforms +from PIL import Image +import torch +import cv2 + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.NEAREST + else: + pil_inter = PIL.Image.BILINEAR + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip_test.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + dim = len(mean) + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + # print(clip_test.size()) + # if dim == 3: + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + # else: + # clip_test.sub_(mean[:, None, None]).div_(std[:, None, None]) + return clip + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format + """ + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip_test (list of numpy.ndarray): clip_test (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = tensor_clip.div(255) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor + """ + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor + +class ColorDistortion(object): + def __init__(self, s=1.0): + self.s = s + self.color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s) + self.rnd_color_jitter = transforms.RandomApply([self.color_jitter], p=0.8) + self.rnd_gray = transforms.RandomGrayscale(p=0.2) + + def __call__(self, video): + color_distort = transforms.Compose([self.rnd_color_jitter, self.rnd_gray]) + return color_distort(video) + + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip_test + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = crop_clip(clip, y1, x1, h, w) + + return cropped + + +class CornerCrop(object): + + def __init__(self, size, crop_position=None): + self.size = size + if crop_position is None: + self.randomize = True + else: + self.randomize = False + self.crop_position = crop_position + self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br'] + + def __call__(self, imgs): + t, h, w, c = imgs.shape + corner_imgs = list() + for n in self.crop_positions: + #print(n) + if n == 'c': + th, tw = (self.size, self.size) + x1 = int(round((w- tw) / 2.)) + y1 = int(round((h - th) / 2.)) + x2 = x1 + tw + y2 = y1 + th + elif n == 'tl': + x1 = 0 + y1 = 0 + x2 = self.size + y2 = self.size + elif n == 'tr': + x1 = w - self.size + y1 = 0 + x2 = w + y2 = self.size + elif n == 'bl': + x1 = 0 + y1 = h - self.size + x2 = self.size + y2 = h + elif n == 'br': + x1 = w - self.size + y1 = h - self.size + x2 = w + y2 = h + corner_imgs.append(imgs[:, y1:y2, x1:x2, :]) + return corner_imgs + + def randomize_parameters(self): + if self.randomize: + self.crop_position = self.crop_positions[random.randint( + 0, + len(self.crop_positions) - 1)] + + +class RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class STA_RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + bsz = len(clip) + angle = random.uniform(self.degrees[0], self.degrees[1]) + angles = [(i+1)/(bsz+1) * angle for i in range(bsz)] + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class Each_RandomRotation(object): + """Rotate entire clip_test randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + bsz = len(clip) + angles = [random.uniform(self.degrees[0], self.degrees[1]) for i in range(bsz)] + # print(angles) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angles[i]) for i, img in enumerate(clip)] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angles[i]) for i, img in enumerate(clip)] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip_test + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip_test (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img sync_dir function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class EachColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip_test + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip_test (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img sync_dir function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip_test with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this sync_dir + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This sync_dir acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip_test of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip_test. + """ + return normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class TensorToNumpy(object): + + def __init__(self): + pass + + def __call__(self, clip): + np_clip = clip.permute(1, 2, 3, 0).cpu().detach().numpy() + pil_clip = [Image.fromarray(np.uint8(numpy_image)).convert('RGB') for numpy_image in np_clip] + return pil_clip diff --git a/Pretrain/Multi-Modalities-Pretraining/README.md b/Pretrain/Multi-Modalities-Pretraining/README.md new file mode 100644 index 0000000..3a44bcf --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/README.md @@ -0,0 +1,35 @@ +# Multi-Modalities-Pretraining + +This is an official implementation of the multi-modalities pre-training model in +[InternVideo](https://arxiv.org/abs/2212.03191), which is resposible for multi-modalities tasks including zero-shot action recognition, zero-shot multiple choice, zero-shot retrieval, video question answering, video-text retrieval, and also one of the component in the final InternVideo model. + +## Usage + +### Model preparation + +We currently provide the [B/16](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/InternVideo-MM-B-16.ckpt) model, please download the model from aliyun and place them under folder `models`. The model uses [UniformerV2](https://github.com/OpenGVLab/UniFormerV2) as backbone, and is trained for 12 days using 128 NVIDIA A100 GPUs. + +### Demo + +To classify the demo video of an airplane taking off, run `python demo.py`, and hopefully you'll see the results as (L/14 model) + +``` +Label probs: +an airplane is taking off : 0.9562 +an airplane is flying : 0.0438 +a dog is chasing a ball : 0.0000 +``` + +### For downstream tasks + +This folder aims at providing a minimal inference implementation for easier usage. For training and fine-tuning for downstream tasks, please refer to other specific folders. + +If you intend to use InternVideo for your own video-language tasks, use the video encoder and text encoder only for alignment tasks such as retrieval, and use the features from cross-modality decoder at the same time if your task involves modalities fusion such as video question answering. + +## TODO + +The training code and L/14 model is on its way. + +## Acknowledgement + +This repo is built based on [CLIP](https://github.com/openai/CLIP) and [open_clip](https://github.com/mlfoundations/open_clip). diff --git a/Pretrain/Multi-Modalities-Pretraining/data/demo.mp4 b/Pretrain/Multi-Modalities-Pretraining/data/demo.mp4 new file mode 100644 index 0000000..b03e11e Binary files /dev/null and b/Pretrain/Multi-Modalities-Pretraining/data/demo.mp4 differ diff --git a/Pretrain/Multi-Modalities-Pretraining/demo.py b/Pretrain/Multi-Modalities-Pretraining/demo.py new file mode 100644 index 0000000..edb1e18 --- /dev/null +++ b/Pretrain/Multi-Modalities-Pretraining/demo.py @@ -0,0 +1,23 @@ +import torch +import InternVideo + +text_cand = ["an airplane is taking off", "an airplane is flying", "a dog is chasing a ball"] +video = InternVideo.load_video("./data/demo.mp4").cuda() + +model = InternVideo.load_model("./models/InternVideo-MM-L-14.ckpt").cuda() +text = InternVideo.tokenize( + text_cand +).cuda() + +with torch.no_grad(): + text_features = model.encode_text(text) + video_features = model.encode_video(video.unsqueeze(0)) + + video_features = torch.nn.functional.normalize(video_features, dim=1) + text_features = torch.nn.functional.normalize(text_features, dim=1) + t = model.logit_scale.exp() + probs = (video_features @ text_features.T * t).softmax(dim=-1).cpu().numpy() + +print("Label probs: ") # [[9.5619422e-01 4.3805469e-02 2.0393253e-07]] +for t, p in zip(text_cand, probs[0]): + print("{:30s}: {:.4f}".format(t, p)) diff --git a/Pretrain/UniFormerV2 b/Pretrain/UniFormerV2 new file mode 160000 index 0000000..d390105 --- /dev/null +++ b/Pretrain/UniFormerV2 @@ -0,0 +1 @@ +Subproject commit d390105e588665af5029bfcceed5b9975d4b13bb diff --git a/Pretrain/VideoMAE/.gitignore b/Pretrain/VideoMAE/.gitignore new file mode 100644 index 0000000..b5f89e0 --- /dev/null +++ b/Pretrain/VideoMAE/.gitignore @@ -0,0 +1,147 @@ +# Docker file from Python is inspired from here : +# https://github.com/github/gitignore/blob/master/Python.gitignore + +# custom +.vscode + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +tests/report/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + + +# Cython debug symbols +cython_debug/ + + +# others +work_dir/ +pfs_work_dir/ diff --git a/Pretrain/VideoMAE/README.md b/Pretrain/VideoMAE/README.md new file mode 100644 index 0000000..484cc30 --- /dev/null +++ b/Pretrain/VideoMAE/README.md @@ -0,0 +1,29 @@ +# VideoMAE +The code is modified from [VideoMAE](https://github.com/MCG-NJU/VideoMAE), and the following features have been added: + +- support adjusting the input resolution and number of the frames when fine-tuning (The original offical codebase only support adjusting the number of frames) +- support applying repeated augmentation when pre-training + +## Installation +- python 3.6 or higher +- pytorch 1.8 or higher +- timm==0.4.8/0.4.12 +- deepspeed==0.5.8 +- TensorboardX +- decord +- einops +- opencv-python +- (optional) petrel sdk (for reading the data on ceph) + +## ModelZoo + +| Backbone | Pretrain Data | Finetune Data | Epoch | \#Frame | Pre-train | Fine-tune | Top-1 | Top-5 | +| :------: | :-----: | :-----:| :---: | :-------: | :----------------------: | :--------------------: | :---: | :---: | +| ViT-B | UnlabeledHybrid | Kinetics-400 | 800 | 16 x 5 x 3 | [vit_b_hybrid_pt_800e.pth](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e.pth) | [vit_b_hybrid_pt_800e_k400_ft.pth](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e_k400_ft.pth) | 81.52 | 94.88 | +| ViT-B | UnlabeledHybrid | K710* | 800 | 16 x 5 x 3 | same as above | [vit_b_hybrid_pt_800e_k710_ft.pth](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e_k710_ft.pth) | 79.33 | 94.03 | +| ViT-B | UnlabeledHybrid | Something-Something V2 | 800 | 16 x 2 x 3 | same as above | [vit_b_hybrid_pt_800e_ssv2_ft.pth](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e_ssv2_ft.pth) | 71.22 | 93.31 | + +Note: K710 is the union of different versions of Kinetics datasets (K400, K600, K700) where their label semantics are aligned and the duplicate videos with the validation sets are removed. K710 contains 658k training videos and 67k validation videos. + +## Others +Please refer to [VideoMAE](https://github.com/MCG-NJU/VideoMAE) for Data, Pretrain and Finetune sections. diff --git a/Pretrain/VideoMAE/anet.py b/Pretrain/VideoMAE/anet.py new file mode 100644 index 0000000..cbaa32c --- /dev/null +++ b/Pretrain/VideoMAE/anet.py @@ -0,0 +1,493 @@ +# pylint: disable=line-too-long,too-many-lines,missing-docstring +import io +import os +import random +import warnings + +import cv2 +import decord +import numpy as np +import torch +import torch.distributed as dist +from decord import VideoReader, cpu +from numpy.lib.function_base import disp +from petrel_client.client import Client +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + +import video_transforms as video_transforms +import volume_transforms as volume_transforms + +# from video_transforms import create_random_augment +from random_erasing import RandomErasing + + +class ANetClsDataset(Dataset): + """Load your own video classification dataset.""" + + def __init__(self, + anno_path, + data_path, + mode='train', + clip_len=16, + frame_sample_rate=4, + crop_size=224, + short_side_size=256, + new_height=256, + new_width=340, + keep_aspect_ratio=True, + num_segment=1, + num_crop=1, + test_num_segment=1, + test_num_crop=1, + sparse_sampling=True, + args=None): + self.anno_path = anno_path + self.data_path = data_path + self.mode = mode + self.clip_len = clip_len + self.frame_sample_rate = frame_sample_rate + self.crop_size = crop_size + self.short_side_size = short_side_size + self.new_height = new_height + self.new_width = new_width + self.keep_aspect_ratio = keep_aspect_ratio + self.num_segment = num_segment + self.test_num_segment = test_num_segment + self.num_crop = num_crop + self.test_num_crop = test_num_crop + self.sparse_sampling = sparse_sampling + self.args = args + self.aug = False + self.rand_erase = False + + # self.use_temporal_gradient = False + # self.temporal_gradient_rate = 0.0 + if self.mode in ['train']: + self.aug = True + if self.args.reprob > 0: + self.rand_erase = True + if VideoReader is None: + raise ImportError( + "Unable to import `decord` which is required to read videos.") + + import pandas as pd + cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ') + self.dataset_samples = list(cleaned.values[:, 0]) + self.total_times = list(cleaned.values[:, 1]) + self.start_times = list(cleaned.values[:, 2]) + self.end_times = list(cleaned.values[:, 3]) + self.label_array = list(cleaned.values[:, 4]) + + self.client = Client() + + if (mode == 'train'): + pass + + elif (mode == 'validation'): + self.data_transform = video_transforms.Compose([ + video_transforms.Resize(self.short_side_size, + interpolation='bilinear'), + video_transforms.CenterCrop(size=(self.crop_size, + self.crop_size)), + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + elif mode == 'test': + self.data_resize = video_transforms.Compose([ + video_transforms.Resize(size=(short_side_size), + interpolation='bilinear') + ]) + self.data_transform = video_transforms.Compose([ + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + self.test_seg = [] + self.test_dataset = [] + self.test_total_times = [] + self.test_start_times = [] + self.test_end_times = [] + self.test_label_array = [] + for ck in range(self.test_num_segment): + for cp in range(self.test_num_crop): + for idx in range(len(self.label_array)): + sample_label = self.label_array[idx] + self.test_label_array.append(sample_label) + self.test_total_times.append(self.total_times[idx]) + self.test_start_times.append(self.start_times[idx]) + self.test_end_times.append(self.end_times[idx]) + self.test_dataset.append(self.dataset_samples[idx]) + self.test_seg.append((ck, cp)) + + def __getitem__(self, index): + if self.mode == 'train': + args = self.args + scale_t = 1 + # data_transform_func = self.data_transform + + sample = self.dataset_samples[index] + total_time = self.total_times[index] + start_time = self.start_times[index] + end_time = self.end_times[index] + buffer = self.loadvideo_decord(sample, + total_time, + start_time, + end_time, + sample_rate_scale=scale_t) + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during training".format( + sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + total_time = self.total_times[index] + start_time = self.start_times[index] + end_time = self.end_times[index] + buffer = self.loadvideo_decord(sample, + total_time, + start_time, + end_time, + sample_rate_scale=scale_t) + + # buffer = data_transform_func(buffer) # T H W C + if args.num_sample > 1: + frame_list = [] + label_list = [] + index_list = [] + for _ in range(args.num_sample): + new_frames = self._aug_frame(buffer, args) + label = self.label_array[index] + #new_frames = [new_frames] + frame_list.append(new_frames) + label_list.append(label) + index_list.append(index) + return frame_list, label_list, index_list, {} + else: + buffer = self._aug_frame(buffer, args) + + return buffer, self.label_array[index], index, {} + + elif self.mode == 'validation': + sample = self.dataset_samples[index] + total_time = self.total_times[index] + start_time = self.start_times[index] + end_time = self.end_times[index] + buffer = self.loadvideo_decord(sample, total_time, start_time, + end_time) + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during validation". + format(sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + total_time = self.total_times[index] + start_time = self.start_times[index] + end_time = self.end_times[index] + buffer = self.loadvideo_decord(sample, total_time, + start_time, end_time) + buffer = self.data_transform(buffer) + return buffer, self.label_array[index], sample.split( + "/")[-1].split(".")[0] + + elif self.mode == 'test': + sample = self.test_dataset[index] + total_time = self.total_times[index] + start_time = self.start_times[index] + end_time = self.end_times[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.loadvideo_decord(sample, total_time, start_time, + end_time) + + while len(buffer) == 0: + warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\ + str(self.test_dataset[index]), chunk_nb, split_nb)) + index = np.random.randint(self.__len__()) + sample = self.test_dataset[index] + total_time = self.total_times[index] + start_time = self.start_times[index] + end_time = self.end_times[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.loadvideo_decord(sample, total_time, start_time, + end_time) + + buffer = self.data_resize(buffer) + if isinstance(buffer, list): + buffer = np.stack(buffer, 0) + + spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ + / (self.test_num_crop - 1) + temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \ + / (self.test_num_segment - 1), 0) + temporal_start = int(chunk_nb * temporal_step) + spatial_start = int(split_nb * spatial_step) + if buffer.shape[1] >= buffer.shape[2]: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + spatial_start:spatial_start + self.short_side_size, :, :] + else: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + :, spatial_start:spatial_start + self.short_side_size, :] + + buffer = self.data_transform(buffer) + return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \ + chunk_nb, split_nb + else: + raise NameError('mode {} unkown'.format(self.mode)) + + def _aug_frame( + self, + buffer, + args, + ): + + aug_transform = video_transforms.create_random_augment( + input_size=(self.crop_size, self.crop_size), + auto_augment=args.aa, + interpolation=args.train_interpolation, + ) + + buffer = [transforms.ToPILImage()(frame) for frame in buffer] + + buffer = aug_transform(buffer) + + buffer = [transforms.ToTensor()(img) for img in buffer] + buffer = torch.stack(buffer) # T C H W + buffer = buffer.permute(0, 2, 3, 1) # T H W C + + # T H W C + buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]) + # T H W C -> C T H W. + buffer = buffer.permute(3, 0, 1, 2) + # Perform data augmentation. + scl, asp = ( + [0.08, 1.0], + [0.75, 1.3333], + ) + + buffer = spatial_sampling( + buffer, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + random_horizontal_flip=False if args.data_set == 'SSV2' else True, + inverse_uniform_sampling=False, + aspect_ratio=asp, + scale=scl, + motion_shift=False) + + if self.rand_erase: + erase_transform = RandomErasing( + args.reprob, + mode=args.remode, + max_count=args.recount, + num_splits=args.recount, + device="cpu", + ) + buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W + buffer = erase_transform(buffer) + buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W + + # buffer = self.data_transform_after(buffer) + return buffer + + def loadvideo_decord(self, + sample, + total_time, + start_time, + end_time, + sample_rate_scale=1): + """Load video content using Decord""" + # pylint: disable=line-too-long, bare-except, unnecessary-comprehension + # fname = self.data_path + sample + fname = sample + + if self.keep_aspect_ratio: + if fname.startswith('s3'): + video_bytes = self.client.get(fname) + vr = VideoReader(io.BytesIO(video_bytes), + num_threads=1, + ctx=cpu(0)) + else: + vr = VideoReader(fname, num_threads=1, ctx=cpu(0)) + else: + if fname.startswith('s3:'): + video_bytes = self.client.get(fname) + vr = VideoReader(io.BytesIO(video_bytes), + width=self.new_width, + height=self.new_height, + num_threads=1, + ctx=cpu(0)) + else: + vr = VideoReader(fname, + width=self.new_width, + height=self.new_height, + num_threads=1, + ctx=cpu(0)) + + if self.mode == 'test': + all_index = [x for x in range(0, len(vr), self.frame_sample_rate)] + while len(all_index) < self.clip_len: + all_index.append(all_index[-1]) + vr.seek(0) + buffer = vr.get_batch(all_index).asnumpy() + return buffer + + # handle temporal segments + total_num_frames = len(vr) + trimmed_video_len = int(total_num_frames * (end_time - start_time) / + total_time) + + all_index = [] + if self.sparse_sampling: + average_duration = trimmed_video_len // self.clip_len + if average_duration > 0: + all_index += list( + np.multiply(list(range(self.clip_len)), average_duration) + + np.random.randint(average_duration, size=self.clip_len)) + elif trimmed_video_len > self.num_segment: + all_index += list( + np.sort( + np.random.randint(trimmed_video_len, + size=self.clip_len))) + else: + all_index += [0] * (self.clip_len - trimmed_video_len) + list( + range(trimmed_video_len)) + all_index = list(np.array(all_index)) + else: + seg_len = trimmed_video_len // self.num_segment + converted_len = int(self.clip_len * self.frame_sample_rate) + for i in range(self.num_segment): + if seg_len <= converted_len: + index = np.linspace(0, + seg_len, + num=seg_len // self.frame_sample_rate) + index = np.concatenate( + (index, + np.ones(self.clip_len - + seg_len // self.frame_sample_rate) * seg_len)) + index = np.clip(index, 0, seg_len - 1).astype(np.int64) + else: + end_idx = np.random.randint(converted_len, seg_len) + str_idx = end_idx - converted_len + index = np.linspace(str_idx, end_idx, num=self.clip_len) + index = np.clip(index, str_idx, + end_idx - 1).astype(np.int64) + index = index + i * seg_len + all_index.extend(list(index)) + + all_index = all_index[::int(sample_rate_scale)] + + init_off = int(total_num_frames * start_time / total_time) + all_index = [idx + init_off for idx in all_index] + all_index = [ + idx if idx < total_num_frames else total_num_frames - 1 + for idx in all_index + ] + + vr.seek(0) + buffer = vr.get_batch(all_index).asnumpy() + return buffer + + def __len__(self): + if self.mode != 'test': + return len(self.dataset_samples) + else: + return len(self.test_dataset) + + +def spatial_sampling( + frames, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + random_horizontal_flip=True, + inverse_uniform_sampling=False, + aspect_ratio=None, + scale=None, + motion_shift=False, +): + """ + Perform spatial sampling on the given video frames. If spatial_idx is + -1, perform random scale, random crop, and random flip on the given + frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling + with the given spatial_idx. + Args: + frames (tensor): frames of images sampled from the video. The + dimension is `num frames` x `height` x `width` x `channel`. + spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, + or 2, perform left, center, right crop if width is larger than + height, and perform top, center, buttom crop if height is larger + than width. + min_scale (int): the minimal size of scaling. + max_scale (int): the maximal size of scaling. + crop_size (int): the size of height and width used to crop the + frames. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, + max_scale]. + aspect_ratio (list): Aspect ratio range for resizing. + scale (list): Scale range for resizing. + motion_shift (bool): Whether to apply motion shift for resizing. + Returns: + frames (tensor): spatially sampled frames. + """ + assert spatial_idx in [-1, 0, 1, 2] + if spatial_idx == -1: + if aspect_ratio is None and scale is None: + frames, _ = video_transforms.random_short_side_scale_jitter( + images=frames, + min_size=min_scale, + max_size=max_scale, + inverse_uniform_sampling=inverse_uniform_sampling, + ) + frames, _ = video_transforms.random_crop(frames, crop_size) + else: + transform_func = (video_transforms.random_resized_crop_with_shift + if motion_shift else + video_transforms.random_resized_crop) + frames = transform_func( + images=frames, + target_height=crop_size, + target_width=crop_size, + scale=scale, + ratio=aspect_ratio, + ) + if random_horizontal_flip: + frames, _ = video_transforms.horizontal_flip(0.5, frames) + else: + # The testing is deterministic and no jitter should be performed. + # min_scale, max_scale, and crop_size are expect to be the same. + assert len({min_scale, max_scale, crop_size}) == 1 + frames, _ = video_transforms.random_short_side_scale_jitter( + frames, min_scale, max_scale) + frames, _ = video_transforms.uniform_crop(frames, crop_size, + spatial_idx) + return frames + + +def tensor_normalize(tensor, mean, std): + """ + Normalize a given tensor by subtracting the mean and dividing the std. + Args: + tensor (tensor): tensor to normalize. + mean (tensor or list): mean value to subtract. + std (tensor or list): std to divide. + """ + if tensor.dtype == torch.uint8: + tensor = tensor.float() + tensor = tensor / 255.0 + if type(mean) == list: + mean = torch.tensor(mean) + if type(std) == list: + std = torch.tensor(std) + tensor = tensor - mean + tensor = tensor / std + return tensor diff --git a/Pretrain/VideoMAE/datasets.py b/Pretrain/VideoMAE/datasets.py new file mode 100644 index 0000000..e569f9b --- /dev/null +++ b/Pretrain/VideoMAE/datasets.py @@ -0,0 +1,467 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import os + +import torch +from timm.data import create_transform +from timm.data.constants import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + IMAGENET_INCEPTION_MEAN, + IMAGENET_INCEPTION_STD, +) +from torchvision import datasets, transforms + +from anet import ANetClsDataset +from kinetics import RawFrameDataset, VideoClsDataset +from mae import HybridVideoMAE, VideoMAE +from masking_generator import ( + CellRunningMaskingGenerator, + RandomMaskingGenerator, + TemporalCenteringProgressiveMaskingGenerator, + TemporalConsistencyMaskingGenerator, + TemporalProgressiveMaskingGenerator, +) +from ssv2 import SSRawFrameClsDataset +from transforms import * + + +class DataAugmentationForMAE(object): + + def __init__(self, args): + imagenet_default_mean_and_std = args.imagenet_default_mean_and_std + mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN + std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD + self.input_mean = [0.485, 0.456, 0.406] + self.input_std = [0.229, 0.224, 0.225] + div = True + roll = False + normalize = GroupNormalize(self.input_mean, self.input_std) + self.train_augmentation = GroupMultiScaleCrop(args.input_size, + [1, .875, .75, .66]) + self.transform = transforms.Compose([ + # GroupScale((240,320)), + self.train_augmentation, + Stack(roll=roll), + ToTorchFormatTensor(div=div), + normalize, + ]) + if args.mask_type == 'random': + self.masked_position_generator = RandomMaskingGenerator( + args.window_size, args.mask_ratio) + elif args.mask_type == 't_consist': + self.masked_position_generator = TemporalConsistencyMaskingGenerator( + args.window_size, args.mask_ratio) + elif args.mask_type == 't_progressive': + self.masked_position_generator = TemporalProgressiveMaskingGenerator( + args.window_size, args.mask_ratio) + elif args.mask_type == 't_center_prog': + self.masked_position_generator = TemporalCenteringProgressiveMaskingGenerator( + args.window_size, args.mask_ratio) + + def __call__(self, images): + process_data, _ = self.transform(images) + return process_data, self.masked_position_generator() + + def __repr__(self): + repr = "(DataAugmentationForBEiT,\n" + repr += " transform = %s,\n" % str(self.transform) + repr += " Masked position generator = %s,\n" % str( + self.masked_position_generator) + repr += ")" + return repr + + +def build_pretraining_dataset(args): + transform = DataAugmentationForMAE(args) + dataset = HybridVideoMAE( + root=args.data_root, + # dataset = VideoMAE(root=args.data_root, + setting=args.data_path, + train=True, + test_mode=False, + video_ext='mp4', + is_color=True, + modality='rgb', + num_segments=1, + num_crop=1, + new_length=args.num_frames, + new_step=args.sampling_rate, + transform=transform, + temporal_jitter=False, + video_loader=True, + use_decord=True, + lazy_init=False, + num_sample=args.num_sample) + print("Data Aug = %s" % str(transform)) + return dataset + + +def build_dataset(is_train, test_mode, args): + + if args.data_set == 'Kinetics-400': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + if not args.sparse_sample: + dataset = VideoClsDataset( + anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + sparse_sample=False, + v4d_segment=args.num_segment if is_train else 1, + args=args) + else: + dataset = VideoClsDataset( + anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=1, + frame_sample_rate=1, + num_segment=args.num_frames, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + sparse_sample=True, + v4d_segment=args.num_segment if is_train else 1, + args=args) + nb_classes = 400 + + elif args.data_set == 'Kinetics-600': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = VideoClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 600 + + elif args.data_set == 'Kinetics-700': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = VideoClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 700 + + elif args.data_set == 'MixKinetics': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = VideoClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 710 + + elif args.data_set == 'SSV2': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = SSRawFrameClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=1, + num_segment=args.num_frames, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + filename_tmpl='img_{:05}.jpg', + args=args) + + # dataset = RawFrameDataset(anno_path=anno_path, + # data_path='/', + # mode=mode, + # clip_len=args.num_frames, + # frame_sample_rate=args.sampling_rate, + # num_segment=1, + # test_num_segment=args.test_num_segment, + # test_num_crop=args.test_num_crop, + # num_crop=1 if not test_mode else 3, + # keep_aspect_ratio=True, + # crop_size=args.input_size, + # short_side_size=args.short_side_size, + # new_height=256, + # new_width=320, + # filename_tmpl='img_{:05}.jpg', + # args=args) + nb_classes = 174 + + elif args.data_set == 'UCF101': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = VideoClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 101 + + elif args.data_set == 'HMDB51': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = VideoClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 51 + + elif args.data_set == 'Diving48': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'val.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = VideoClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 48 + elif args.data_set == 'ANet': + mode = None + anno_path = None + if is_train == True: + mode = 'train' + anno_path = os.path.join(args.data_path, 'train.csv') + elif test_mode == True: + mode = 'test' + anno_path = os.path.join(args.data_path, 'test.csv') + else: + mode = 'validation' + anno_path = os.path.join(args.data_path, 'val.csv') + + dataset = ANetClsDataset(anno_path=anno_path, + data_path='/', + mode=mode, + clip_len=args.num_frames, + frame_sample_rate=args.sampling_rate, + num_segment=1, + test_num_segment=args.test_num_segment, + test_num_crop=args.test_num_crop, + num_crop=1 if not test_mode else 3, + keep_aspect_ratio=True, + crop_size=args.input_size, + short_side_size=args.short_side_size, + new_height=256, + new_width=320, + args=args) + nb_classes = 200 + else: + raise NotImplementedError() + assert nb_classes == args.nb_classes + print("Number of the class = %d" % args.nb_classes) + + return dataset, nb_classes + + +def build_transform(is_train, test_mode, args): + resize_im = args.input_size > 32 + imagenet_default_mean_and_std = args.imagenet_default_mean_and_std + mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN + std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD + + if is_train: + # this should always dispatch to transforms_imagenet_train + transform = create_transform( + input_size=args.input_size, + is_training=True, + color_jitter=args.color_jitter, + auto_augment=args.aa, + interpolation=args.train_interpolation, + re_prob=args.reprob, + re_mode=args.remode, + re_count=args.recount, + mean=mean, + std=std, + ) + if not resize_im: + # replace RandomResizedCropAndInterpolation with + # RandomCrop + transform.transforms[0] = transforms.RandomCrop(args.input_size, + padding=4) + return transform + + t = [] + if resize_im: + if args.crop_pct is None: + if args.input_size < 384: + args.crop_pct = 224 / 256 + else: + args.crop_pct = 1.0 + size = int(args.input_size / args.crop_pct) + t.append( + transforms.Resize( + size, + interpolation=3), # to maintain same ratio w.r.t. 224 images + ) + t.append(transforms.CenterCrop(args.input_size)) + + t.append(transforms.ToTensor()) + t.append(transforms.Normalize(mean, std)) + return transforms.Compose(t) diff --git a/Pretrain/VideoMAE/engine_for_finetuning.py b/Pretrain/VideoMAE/engine_for_finetuning.py new file mode 100644 index 0000000..d498e72 --- /dev/null +++ b/Pretrain/VideoMAE/engine_for_finetuning.py @@ -0,0 +1,345 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +import os +import sys +from typing import Iterable, Optional + +import numpy as np +import torch +from scipy.special import softmax +from timm.data import Mixup +from timm.utils import ModelEma, accuracy + +import utils + + +def train_class_batch(model, samples, target, criterion): + outputs = model(samples) + loss = criterion(outputs, target) + return loss, outputs + + +def get_loss_scale_for_deepspeed(model): + optimizer = model.optimizer + return optimizer.loss_scale if hasattr( + optimizer, "loss_scale") else optimizer.cur_scale + + +def train_one_epoch(model: torch.nn.Module, + criterion: torch.nn.Module, + data_loader: Iterable, + optimizer: torch.optim.Optimizer, + device: torch.device, + epoch: int, + loss_scaler, + args, + max_norm: float = 0, + model_ema: Optional[ModelEma] = None, + mixup_fn: Optional[Mixup] = None, + log_writer=None, + start_steps=None, + lr_schedule_values=None, + wd_schedule_values=None, + num_training_steps_per_epoch=None, + update_freq=None): + model.train(True) + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter( + 'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + metric_logger.add_meter( + 'min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 10 + + if loss_scaler is None: + model.zero_grad() + model.micro_steps = 0 + else: + optimizer.zero_grad() + + for data_iter_step, (samples, targets, _, _) in enumerate( + metric_logger.log_every(data_loader, print_freq, header)): + step = data_iter_step // update_freq + if step >= num_training_steps_per_epoch: + continue + it = start_steps + step # global training iteration + # Update LR & WD for the first acc + if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0: + for i, param_group in enumerate(optimizer.param_groups): + if lr_schedule_values is not None: + param_group["lr"] = lr_schedule_values[it] * param_group[ + "lr_scale"] + if wd_schedule_values is not None and param_group[ + "weight_decay"] > 0: + param_group["weight_decay"] = wd_schedule_values[it] + + samples = samples.to(device, non_blocking=True) + targets = targets.to(device, non_blocking=True) + + if args.num_segment > 1: + samples = samples.view((-1, ) + samples.size()[2:]) + + if mixup_fn is not None: + B, C, T, H, W = samples.shape + samples = samples.view(B, C * T, H, W) + samples, targets = mixup_fn(samples, targets) + samples = samples.view(B, C, T, H, W) + + if loss_scaler is None: + samples = samples.half() + loss, output = train_class_batch(model, samples, targets, + criterion) + else: + with torch.cuda.amp.autocast(): + loss, output = train_class_batch(model, samples, targets, + criterion) + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(1) + + if loss_scaler is None: + loss /= update_freq + model.backward(loss) + grad_norm = model.get_global_grad_norm() + + # if utils.is_main_process(): + # import json + # all_grad = {} + # for name, param in model.module.named_parameters(): + # print(name) + # if hasattr(param, 'grad') and param.grad is not None: + # all_grad[name] = torch.norm(param.grad.detach(), 2).item() + # with open('pfs_work_dir/grad_test/temp.json', 'w') as f: + # json.dump(all_grad, f) + # torch.distributed.barrier() + # sys.exit(1) + + model.step() + + if (data_iter_step + 1) % update_freq == 0: + # model.zero_grad() + # Deepspeed will call step() & model.zero_grad() automatic + if model_ema is not None: + model_ema.update(model) + loss_scale_value = get_loss_scale_for_deepspeed(model) + else: + # this attribute is added by timm on one optimizer (adahessian) + is_second_order = hasattr( + optimizer, 'is_second_order') and optimizer.is_second_order + loss /= update_freq + grad_norm = loss_scaler(loss, + optimizer, + clip_grad=max_norm, + parameters=model.parameters(), + create_graph=is_second_order, + update_grad=(data_iter_step + 1) % + update_freq == 0) + if (data_iter_step + 1) % update_freq == 0: + optimizer.zero_grad() + if model_ema is not None: + model_ema.update(model) + loss_scale_value = loss_scaler.state_dict()["scale"] + + torch.cuda.synchronize() + + if mixup_fn is None: + class_acc = (output.max(-1)[-1] == targets).float().mean() + else: + class_acc = None + metric_logger.update(loss=loss_value) + metric_logger.update(class_acc=class_acc) + metric_logger.update(loss_scale=loss_scale_value) + min_lr = 10. + max_lr = 0. + for group in optimizer.param_groups: + min_lr = min(min_lr, group["lr"]) + max_lr = max(max_lr, group["lr"]) + + metric_logger.update(lr=max_lr) + metric_logger.update(min_lr=min_lr) + weight_decay_value = None + for group in optimizer.param_groups: + if group["weight_decay"] > 0: + weight_decay_value = group["weight_decay"] + metric_logger.update(weight_decay=weight_decay_value) + metric_logger.update(grad_norm=grad_norm) + + if log_writer is not None: + log_writer.update(loss=loss_value, head="loss") + log_writer.update(class_acc=class_acc, head="loss") + log_writer.update(loss_scale=loss_scale_value, head="opt") + log_writer.update(lr=max_lr, head="opt") + log_writer.update(min_lr=min_lr, head="opt") + log_writer.update(weight_decay=weight_decay_value, head="opt") + log_writer.update(grad_norm=grad_norm, head="opt") + + log_writer.set_step() + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def validation_one_epoch(data_loader, model, device): + criterion = torch.nn.CrossEntropyLoss() + + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Val:' + + # switch to evaluation mode + model.eval() + + for batch in metric_logger.log_every(data_loader, 10, header): + images = batch[0] + target = batch[1] + images = images.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(): + output = model(images) + loss = criterion(output, target) + + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + batch_size = images.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print( + '* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' + .format(top1=metric_logger.acc1, + top5=metric_logger.acc5, + losses=metric_logger.loss)) + + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def final_test(data_loader, model, device, file): + criterion = torch.nn.CrossEntropyLoss() + + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + + # switch to evaluation mode + model.eval() + final_result = [] + + for batch in metric_logger.log_every(data_loader, 10, header): + images = batch[0] + target = batch[1] + ids = batch[2] + chunk_nb = batch[3] + split_nb = batch[4] + images = images.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + + # compute output + with torch.cuda.amp.autocast(): + output = model(images) + loss = criterion(output, target) + + for i in range(output.size(0)): + string = "{} {} {} {} {}\n".format(ids[i], \ + str(output.data[i].cpu().numpy().tolist()), \ + str(int(target[i].cpu().numpy())), \ + str(int(chunk_nb[i].cpu().numpy())), \ + str(int(split_nb[i].cpu().numpy()))) + final_result.append(string) + + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + batch_size = images.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + + if not os.path.exists(file): + os.mknod(file) + with open(file, 'w') as f: + f.write("{}, {}\n".format(acc1, acc5)) + for line in final_result: + f.write(line) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print( + '* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' + .format(top1=metric_logger.acc1, + top5=metric_logger.acc5, + losses=metric_logger.loss)) + + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +def merge(eval_path, num_tasks, method='prob'): + assert method in ['prob', 'score'] + dict_feats = {} + dict_label = {} + dict_pos = {} + print("Reading individual output files") + + for x in range(num_tasks): + file = os.path.join(eval_path, str(x) + '.txt') + lines = open(file, 'r').readlines()[1:] + for line in lines: + line = line.strip() + name = line.split('[')[0] + label = line.split(']')[1].split(' ')[1] + chunk_nb = line.split(']')[1].split(' ')[2] + split_nb = line.split(']')[1].split(' ')[3] + data = np.fromstring(line.split('[')[1].split(']')[0], + dtype=np.float, + sep=',') + if not name in dict_feats: + dict_feats[name] = [] + dict_label[name] = 0 + dict_pos[name] = [] + if chunk_nb + split_nb in dict_pos[name]: + continue + if method == 'prob': + dict_feats[name].append(softmax(data)) + else: + dict_feats[name].append(data) + dict_pos[name].append(chunk_nb + split_nb) + dict_label[name] = label + print("Computing final results") + + input_lst = [] + print(len(dict_feats)) + for i, item in enumerate(dict_feats): + input_lst.append([i, item, dict_feats[item], dict_label[item]]) + from multiprocessing import Pool + p = Pool(64) + ans = p.map(compute_video, input_lst) + top1 = [x[1] for x in ans] + top5 = [x[2] for x in ans] + pred = [x[0] for x in ans] + label = [x[3] for x in ans] + final_top1, final_top5 = np.mean(top1), np.mean(top5) + + # print(final_top1*100 ,final_top5*100) + return final_top1 * 100, final_top5 * 100 + + +def compute_video(lst): + i, video_id, data, label = lst + feat = [x for x in data] + feat = np.mean(feat, axis=0) + pred = np.argmax(feat) + top1 = (int(pred) == int(label)) * 1.0 + top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0 + return [pred, top1, top5, int(label)] diff --git a/Pretrain/VideoMAE/engine_for_pretraining.py b/Pretrain/VideoMAE/engine_for_pretraining.py new file mode 100644 index 0000000..7d4cd2d --- /dev/null +++ b/Pretrain/VideoMAE/engine_for_pretraining.py @@ -0,0 +1,171 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +import sys +from typing import Iterable + +import torch +import torch.nn as nn +from einops import rearrange +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + +import utils + + +def train_one_epoch(model: torch.nn.Module, + data_loader: Iterable, + optimizer: torch.optim.Optimizer, + device: torch.device, + epoch: int, + loss_scaler, + max_norm: float = 0, + patch_size: int = 16, + normlize_target: bool = True, + log_writer=None, + lr_scheduler=None, + start_steps=None, + lr_schedule_values=None, + wd_schedule_values=None): + model.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter( + 'lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + metric_logger.add_meter( + 'min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 10 + + loss_func = nn.MSELoss() + + data_loader.dataset.transform.masked_position_generator.update_state(epoch) + + for step, batch in enumerate( + metric_logger.log_every(data_loader, print_freq, header)): + # assign learning rate & weight decay for each step + it = start_steps + step # global training iteration + if lr_schedule_values is not None or wd_schedule_values is not None: + for i, param_group in enumerate(optimizer.param_groups): + if lr_schedule_values is not None: + param_group[ + "lr"] = lr_schedule_values[it] * param_group["lr_scale"] + if wd_schedule_values is not None and param_group[ + "weight_decay"] > 0: + param_group["weight_decay"] = wd_schedule_values[it] + + images, bool_masked_pos = batch + images = images.to(device, non_blocking=True) + bool_masked_pos = bool_masked_pos.to( + device, non_blocking=True).flatten(1).to(torch.bool) + # print(images.shape) + # import pdb; pdb.set_trace() + with torch.no_grad(): + # calculate the predict label + mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, + None, None, + None] + std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, + None, None, + None] + unnorm_images = images * std + mean # in [0, 1] + + if normlize_target: + images_squeeze = rearrange( + unnorm_images, + 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', + p0=2, + p1=patch_size, + p2=patch_size) + images_norm = (images_squeeze - images_squeeze.mean( + dim=-2, keepdim=True)) / ( + images_squeeze.var(dim=-2, unbiased=True, + keepdim=True).sqrt() + 1e-6) + # we find that the mean is about 0.48 and standard deviation is about 0.08. + images_patch = rearrange(images_norm, 'b n p c -> b n (p c)') + else: + images_patch = rearrange( + unnorm_images, + 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2 c)', + p0=2, + p1=patch_size, + p2=patch_size) + + B, N, C = images_patch.shape + labels = images_patch[bool_masked_pos].reshape(B, -1, C) + + if loss_scaler is None: + outputs = model(images, bool_masked_pos) + loss = loss_func(input=outputs, target=labels) + else: + with torch.cuda.amp.autocast(): + outputs = model(images, bool_masked_pos) + loss = loss_func(input=outputs, target=labels) + + loss_value = loss.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + sys.exit(2) + + optimizer.zero_grad() + + if loss_scaler is None: + loss.backward() + if max_norm is None: + grad_norm = utils.get_grad_norm_(model.parameters()) + else: + grad_norm = torch.nn.utils.clip_grad_norm_( + model.parameters(), max_norm) + optimizer.step() + loss_scale_value = 0 + else: + # this attribute is added by timm on one optimizer (adahessian) + is_second_order = hasattr( + optimizer, 'is_second_order') and optimizer.is_second_order + grad_norm = loss_scaler( + loss, + optimizer, + clip_grad=max_norm, + parameters=model.parameters(), + create_graph=is_second_order) + loss_scale_value = loss_scaler.state_dict()["scale"] + + torch.cuda.synchronize() + + metric_logger.update(loss=loss_value) + metric_logger.update(loss_scale=loss_scale_value) + min_lr = 10. + max_lr = 0. + for group in optimizer.param_groups: + min_lr = min(min_lr, group["lr"]) + max_lr = max(max_lr, group["lr"]) + + metric_logger.update(lr=max_lr) + metric_logger.update(min_lr=min_lr) + weight_decay_value = None + for group in optimizer.param_groups: + if group["weight_decay"] > 0: + weight_decay_value = group["weight_decay"] + metric_logger.update(weight_decay=weight_decay_value) + metric_logger.update(grad_norm=grad_norm) + + if log_writer is not None: + log_writer.update(loss=loss_value, head="loss") + log_writer.update(loss_scale=loss_scale_value, head="opt") + log_writer.update(lr=max_lr, head="opt") + log_writer.update(min_lr=min_lr, head="opt") + log_writer.update(weight_decay=weight_decay_value, head="opt") + log_writer.update(grad_norm=grad_norm, head="opt") + + log_writer.set_step() + + if lr_scheduler is not None: + lr_scheduler.step_update(start_steps + step) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} diff --git a/Pretrain/VideoMAE/ensemble.py b/Pretrain/VideoMAE/ensemble.py new file mode 100644 index 0000000..8bed50c --- /dev/null +++ b/Pretrain/VideoMAE/ensemble.py @@ -0,0 +1,86 @@ +import os + +import numpy as np +from matplotlib import use +from scipy.special import softmax + + +def merge(eval_paths, num_tasks, use_softmax=False): + dict_feats = {} + dict_label = {} + print("Reading individual output files") + + if not isinstance(eval_paths, list): + eval_paths = [eval_paths] + + for eval_path in eval_paths: + dict_pos = {} + for x in range(num_tasks): + file = os.path.join(eval_path, str(x) + '.txt') + lines = open(file, 'r').readlines()[1:] + for line in lines: + line = line.strip() + name = line.split('[')[0] + label = line.split(']')[1].split(' ')[1] + chunk_nb = line.split(']')[1].split(' ')[2] + split_nb = line.split(']')[1].split(' ')[3] + data = np.fromstring(line.split('[')[1].split(']')[0], + dtype=np.float64, + sep=',') + if not name in dict_feats: + dict_feats[name] = [] + dict_label[name] = 0 + if not name in dict_pos: + dict_pos[name] = [] + if chunk_nb + split_nb in dict_pos[name]: + continue + if use_softmax: + dict_feats[name].append(softmax(data)) + else: + dict_feats[name].append(data) + dict_pos[name].append(chunk_nb + split_nb) + dict_label[name] = label + print("Computing final results") + + input_lst = [] + print(len(dict_feats)) + for i, item in enumerate(dict_feats): + input_lst.append([i, item, dict_feats[item], dict_label[item]]) + from multiprocessing import Pool + p = Pool(64) + ans = p.map(compute_video, input_lst) + top1 = [x[1] for x in ans] + top5 = [x[2] for x in ans] + pred = [x[0] for x in ans] + label = [x[3] for x in ans] + final_top1, final_top5 = np.mean(top1), np.mean(top5) + + return final_top1 * 100, final_top5 * 100 + + +def compute_video(lst): + i, video_id, data, label = lst + feat = [x for x in data] + feat = np.mean(feat, axis=0) + # print(feat.shape) + try: + pred = np.argmax(feat) + top1 = (int(pred) == int(label)) * 1.0 + top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0 + except: + pred = 0 + top1 = 1.0 + top5 = 1.0 + label = 0 + return [pred, top1, top5, int(label)] + + +if __name__ == '__main__': + eval_path1 = '/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/full_sta_web_k400_finetune' + eval_path2 = '/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/pred_txts' + num_tasks = 32 + eval_paths = [eval_path1, eval_path2] + final_top1, final_top5 = merge(eval_paths, num_tasks, use_softmax=True) + print( + f"Accuracy of the network on the test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%" + ) diff --git a/Pretrain/VideoMAE/functional.py b/Pretrain/VideoMAE/functional.py new file mode 100644 index 0000000..e6bf3ea --- /dev/null +++ b/Pretrain/VideoMAE/functional.py @@ -0,0 +1,90 @@ +import numbers + +import cv2 +import numpy as np +import PIL +import torch + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[0], size[1] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.BILINEAR + else: + pil_inter = PIL.Image.NEAREST + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + + return clip diff --git a/Pretrain/VideoMAE/kinetics.py b/Pretrain/VideoMAE/kinetics.py new file mode 100644 index 0000000..34c47bd --- /dev/null +++ b/Pretrain/VideoMAE/kinetics.py @@ -0,0 +1,788 @@ +# pylint: disable=line-too-long,too-many-lines,missing-docstring +import io +import os +import random +import warnings + +import cv2 +import decord +import numpy as np +import torch +from decord import VideoReader, cpu +from petrel_client.client import Client +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + +import video_transforms as video_transforms +import volume_transforms as volume_transforms + +# from video_transforms import create_random_augment +from random_erasing import RandomErasing + + +class RawFrameDataset(Dataset): + """Load your own video classification dataset.""" + + def __init__(self, + anno_path, + data_path, + mode='train', + clip_len=8, + frame_sample_rate=2, + crop_size=224, + short_side_size=256, + new_height=256, + new_width=340, + keep_aspect_ratio=True, + num_segment=1, + num_crop=1, + test_num_segment=10, + test_num_crop=3, + fname_tmpl='img_{:05}.jpg', + args=None): + self.anno_path = anno_path + self.data_path = data_path + self.mode = mode + self.clip_len = clip_len + self.frame_sample_rate = frame_sample_rate + self.crop_size = crop_size + self.short_side_size = short_side_size + self.new_height = new_height + self.new_width = new_width + self.keep_aspect_ratio = keep_aspect_ratio + self.num_segment = num_segment + self.test_num_segment = test_num_segment + self.num_crop = num_crop + self.test_num_crop = test_num_crop + self.fname_tmpl = fname_tmpl + self.args = args + self.aug = False + self.rand_erase = False + + if self.mode in ['train']: + self.aug = True + if self.args.reprob > 0: + self.rand_erase = True + if VideoReader is None: + raise ImportError( + "Unable to import `decord` which is required to read videos.") + + import pandas as pd + cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ') + self.dataset_samples = list(cleaned.values[:, 0]) + self.total_frames = list(cleaned.values[:, 1]) + self.label_array = list(cleaned.values[:, -1]) + + self.client = Client() + + if (mode == 'train'): + pass + + elif (mode == 'validation'): + self.data_transform = video_transforms.Compose([ + video_transforms.Resize( + self.short_side_size, interpolation='bilinear'), + video_transforms.CenterCrop( + size=(self.crop_size, self.crop_size)), + volume_transforms.ClipToTensor(), + video_transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + elif mode == 'test': + self.data_resize = video_transforms.Compose([ + video_transforms.Resize( + size=(short_side_size), interpolation='bilinear') + ]) + self.data_transform = video_transforms.Compose([ + volume_transforms.ClipToTensor(), + video_transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + self.test_seg = [] + self.test_dataset = [] + self.test_total_frames = [] + self.test_label_array = [] + for ck in range(self.test_num_segment): + for cp in range(self.test_num_crop): + for idx in range(len(self.label_array)): + self.test_seg.append((ck, cp)) + self.test_dataset.append(self.dataset_samples[idx]) + self.test_total_frames.append(self.total_frames[idx]) + self.test_label_array.append(self.label_array[idx]) + + def __getitem__(self, index): + if self.mode == 'train': + args = self.args + scale_t = 1 + + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame( + sample, total_frame, sample_rate_scale=scale_t) # T H W C + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during training".format( + sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame( + sample, total_frame, sample_rate_scale=scale_t) + + if args.num_sample > 1: + frame_list = [] + label_list = [] + index_list = [] + for _ in range(args.num_sample): + new_frames = self._aug_frame(buffer, args) + label = self.label_array[index] + frame_list.append(new_frames) + label_list.append(label) + index_list.append(index) + return frame_list, label_list, index_list, {} + else: + buffer = self._aug_frame(buffer, args) + + return buffer, self.label_array[index], index, {} + + elif self.mode == 'validation': + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame(sample, total_frame) + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during validation". + format(sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame(sample, total_frame) + buffer = self.data_transform(buffer) + return buffer, self.label_array[index], sample.split("/")[-1].split( + ".")[0] + + elif self.mode == 'test': + sample = self.test_dataset[index] + total_frame = self.test_total_frames[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.load_frame(sample, total_frame) + + while len(buffer) == 0: + warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\ + str(self.test_dataset[index]), chunk_nb, split_nb)) + index = np.random.randint(self.__len__()) + sample = self.test_dataset[index] + total_frame = self.test_total_frames[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.load_frame(sample, total_frame) + + buffer = self.data_resize(buffer) + if isinstance(buffer, list): + buffer = np.stack(buffer, 0) + + spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ + / (self.test_num_crop - 1) + temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \ + / (self.test_num_segment - 1), 0) + temporal_start = int(chunk_nb * temporal_step) + spatial_start = int(split_nb * spatial_step) + if buffer.shape[1] >= buffer.shape[2]: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + spatial_start:spatial_start + self.short_side_size, :, :] + else: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + :, spatial_start:spatial_start + self.short_side_size, :] + + buffer = self.data_transform(buffer) + return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \ + chunk_nb, split_nb + else: + raise NameError('mode {} unkown'.format(self.mode)) + + def _aug_frame( + self, + buffer, + args, + ): + + aug_transform = video_transforms.create_random_augment( + input_size=(self.crop_size, self.crop_size), + auto_augment=args.aa, + interpolation=args.train_interpolation, + ) + + buffer = [transforms.ToPILImage()(frame) for frame in buffer] + + buffer = aug_transform(buffer) + + buffer = [transforms.ToTensor()(img) for img in buffer] + buffer = torch.stack(buffer) # T C H W + buffer = buffer.permute(0, 2, 3, 1) # T H W C + + # T H W C + buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]) + # T H W C -> C T H W. + buffer = buffer.permute(3, 0, 1, 2) + # Perform data augmentation. + scl, asp = ( + [0.08, 1.0], + [0.75, 1.3333], + ) + + buffer = spatial_sampling( + buffer, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + random_horizontal_flip=False if args.data_set == 'SSV2' else True, + inverse_uniform_sampling=False, + aspect_ratio=asp, + scale=scl, + motion_shift=False) + + if self.rand_erase: + erase_transform = RandomErasing( + args.reprob, + mode=args.remode, + max_count=args.recount, + num_splits=args.recount, + device="cpu", + ) + buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W + buffer = erase_transform(buffer) + buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W + + # buffer = self.data_transform_after(buffer) + return buffer + + def load_frame(self, sample, num_frames, sample_rate_scale=1): + """Load video rawframe""" + # pylint: disable=line-too-long, bare-except, unnecessary-comprehension + # fname = self.data_path + sample + fname = sample + + if self.mode == 'test': + all_index = [ + x for x in range(0, num_frames, self.frame_sample_rate) + ] + while len(all_index) < self.clip_len: + all_index.append(all_index[-1]) + imgs = [] + for idx in all_index: + frame_fname = os.path.join(fname, + self.fname_tmpl.format(idx + 1)) + img_bytes = self.client.get(frame_fname) + img_np = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(img_np, cv2.IMREAD_COLOR) + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + imgs.append(img) + buffer = np.array(imgs) + return buffer + + # handle temporal segments + converted_len = int(self.clip_len * self.frame_sample_rate) + seg_len = num_frames // self.num_segment + + all_index = [] + for i in range(self.num_segment): + if seg_len <= converted_len: + index = np.linspace( + 0, seg_len, num=seg_len // self.frame_sample_rate) + index = np.concatenate(( + index, + np.ones(self.clip_len - seg_len // self.frame_sample_rate) * + seg_len)) + index = np.clip(index, 0, seg_len - 1).astype(np.int64) + else: + end_idx = np.random.randint(converted_len, seg_len) + str_idx = end_idx - converted_len + index = np.linspace(str_idx, end_idx, num=self.clip_len) + index = np.clip(index, str_idx, end_idx - 1).astype(np.int64) + index = index + i * seg_len + all_index.extend(list(index)) + + all_index = all_index[::int(sample_rate_scale)] + imgs = [] + for idx in all_index: + frame_fname = os.path.join(fname, self.fname_tmpl.format(idx + 1)) + img_bytes = self.client.get(frame_fname) + img_np = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(img_np, cv2.IMREAD_COLOR) + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + imgs.append(img) + buffer = np.array(imgs) + return buffer + + def __len__(self): + if self.mode != 'test': + return len(self.dataset_samples) + else: + return len(self.test_dataset) + + +class VideoClsDataset(Dataset): + """Load your own video classification dataset.""" + + def __init__(self, + anno_path, + data_path, + mode='train', + clip_len=8, + frame_sample_rate=2, + crop_size=224, + short_side_size=256, + new_height=256, + new_width=340, + keep_aspect_ratio=True, + num_segment=1, + num_crop=1, + test_num_segment=10, + test_num_crop=3, + sparse_sample=False, + v4d_segment=1, + args=None): + self.anno_path = anno_path + self.data_path = data_path + self.mode = mode + self.clip_len = clip_len + self.frame_sample_rate = frame_sample_rate + self.crop_size = crop_size + self.short_side_size = short_side_size + self.new_height = new_height + self.new_width = new_width + self.keep_aspect_ratio = keep_aspect_ratio + self.num_segment = num_segment + self.test_num_segment = test_num_segment + self.num_crop = num_crop + self.test_num_crop = test_num_crop + self.sparse_sample = sparse_sample + self.v4d_segment = v4d_segment + self.args = args + self.aug = False + self.rand_erase = False + # self.use_temporal_gradient = False + # self.temporal_gradient_rate = 0.0 + if self.mode in ['train']: + self.aug = True + if self.args.reprob > 0: + self.rand_erase = True + if VideoReader is None: + raise ImportError( + "Unable to import `decord` which is required to read videos.") + + import pandas as pd + cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ') + self.dataset_samples = list(cleaned.values[:, 0]) + self.label_array = list(cleaned.values[:, 1]) + + # conf_path = '/mnt/lustre/huangbingkun/petreloss.conf' + conf_path = '~/petreloss2.conf' + self.client = Client(conf_path) + + if (mode == 'train'): + pass + + elif (mode == 'validation'): + self.data_transform = video_transforms.Compose([ + video_transforms.Resize( + self.short_side_size, interpolation='bilinear'), + video_transforms.CenterCrop( + size=(self.crop_size, self.crop_size)), + volume_transforms.ClipToTensor(), + video_transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + elif mode == 'test': + self.data_resize = video_transforms.Compose([ + video_transforms.Resize( + size=(short_side_size), interpolation='bilinear') + ]) + self.data_transform = video_transforms.Compose([ + volume_transforms.ClipToTensor(), + video_transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + self.test_seg = [] + self.test_dataset = [] + self.test_label_array = [] + for ck in range(self.test_num_segment): + for cp in range(self.test_num_crop): + for idx in range(len(self.label_array)): + sample_label = self.label_array[idx] + self.test_label_array.append(sample_label) + self.test_dataset.append(self.dataset_samples[idx]) + self.test_seg.append((ck, cp)) + + def __getitem__(self, index): + if self.mode == 'train': + args = self.args + scale_t = 1 + # data_transform_func = self.data_transform + + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord( + sample, sample_rate_scale=scale_t) # T H W C + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during training".format( + sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord( + sample, sample_rate_scale=scale_t) + + # buffer = data_transform_func(buffer) # T H W C + if args.num_sample > 1: + frame_list = [] + label_list = [] + index_list = [] + for _ in range(args.num_sample): + new_frames = self._aug_frame(buffer, args) + label = self.label_array[index] + #new_frames = [new_frames] + frame_list.append(new_frames) + label_list.append(label) + index_list.append(index) + return frame_list, label_list, index_list, {} + else: + buffer = self._aug_frame(buffer, args) + if self.v4d_segment > 1: + # C T H W + C, T, H, W = buffer.shape + buffer = buffer.view(C, -1, self.v4d_segment, H, W) + buffer = buffer.permute(2, 0, 1, 3, 4) + + return buffer, self.label_array[index], index, {} + + elif self.mode == 'validation': + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord(sample) + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during validation". + format(sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + buffer = self.loadvideo_decord(sample) + buffer = self.data_transform(buffer) + return buffer, self.label_array[index], sample.split("/")[-1].split( + ".")[0] + + elif self.mode == 'test': + sample = self.test_dataset[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.loadvideo_decord(sample) + + while len(buffer) == 0: + warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\ + str(self.test_dataset[index]), chunk_nb, split_nb)) + index = np.random.randint(self.__len__()) + sample = self.test_dataset[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.loadvideo_decord(sample) + + buffer = self.data_resize(buffer) + if isinstance(buffer, list): + buffer = np.stack(buffer, 0) + + if self.sparse_sample: + spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ + / (self.test_num_crop - 1) + temporal_start = chunk_nb + spatial_start = int(split_nb * spatial_step) + if buffer.shape[1] >= buffer.shape[2]: + buffer = buffer[temporal_start::self.test_num_segment, \ + spatial_start:spatial_start + self.short_side_size, :, :] + else: + buffer = buffer[temporal_start::self.test_num_segment, \ + :, spatial_start:spatial_start + self.short_side_size, :] + else: + spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ + / (self.test_num_crop - 1) + temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \ + / (self.test_num_segment - 1), 0) + temporal_start = int(chunk_nb * temporal_step) + spatial_start = int(split_nb * spatial_step) + if buffer.shape[1] >= buffer.shape[2]: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + spatial_start:spatial_start + self.short_side_size, :, :] + else: + buffer = buffer[temporal_start:temporal_start + self.clip_len, \ + :, spatial_start:spatial_start + self.short_side_size, :] + + buffer = self.data_transform(buffer) + return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \ + chunk_nb, split_nb + else: + raise NameError('mode {} unkown'.format(self.mode)) + + def _aug_frame( + self, + buffer, + args, + ): + + aug_transform = video_transforms.create_random_augment( + input_size=(self.crop_size, self.crop_size), + auto_augment=args.aa, + interpolation=args.train_interpolation, + ) + + buffer = [transforms.ToPILImage()(frame) for frame in buffer] + + buffer = aug_transform(buffer) + + buffer = [transforms.ToTensor()(img) for img in buffer] + buffer = torch.stack(buffer) # T C H W + buffer = buffer.permute(0, 2, 3, 1) # T H W C + + # T H W C + buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]) + # T H W C -> C T H W. + buffer = buffer.permute(3, 0, 1, 2) + # Perform data augmentation. + scl, asp = ( + [0.08, 1.0], + [0.75, 1.3333], + ) + + buffer = spatial_sampling( + buffer, + spatial_idx=-1, + min_scale=256, + max_scale=320, + # crop_size=224, + crop_size=args.input_size, + random_horizontal_flip=False if args.data_set == 'SSV2' else True, + inverse_uniform_sampling=False, + aspect_ratio=asp, + scale=scl, + motion_shift=False) + + if self.rand_erase: + erase_transform = RandomErasing( + args.reprob, + mode=args.remode, + max_count=args.recount, + num_splits=args.recount, + device="cpu", + ) + buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W + buffer = erase_transform(buffer) + buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W + + # buffer = self.data_transform_after(buffer) + return buffer + + def loadvideo_decord(self, sample, sample_rate_scale=1): + """Load video content using Decord""" + # pylint: disable=line-too-long, bare-except, unnecessary-comprehension + # fname = self.data_path + sample + fname = sample + + try: + if self.keep_aspect_ratio: + if fname.startswith('s3'): + video_bytes = self.client.get(fname) + vr = VideoReader( + io.BytesIO(video_bytes), num_threads=1, ctx=cpu(0)) + else: + vr = VideoReader(fname, num_threads=1, ctx=cpu(0)) + else: + if fname.startswith('s3:'): + video_bytes = self.client.get(fname) + vr = VideoReader( + io.BytesIO(video_bytes), + width=self.new_width, + height=self.new_height, + num_threads=1, + ctx=cpu(0)) + else: + vr = VideoReader( + fname, + width=self.new_width, + height=self.new_height, + num_threads=1, + ctx=cpu(0)) + except Exception as e: + print("Failed to load video from {} with error {}".format(fname, e)) + return [] + + segment_len = len(vr) // self.v4d_segment + if self.v4d_segment == 1: + buffer = self.loadvideo_one_v4d_segment(vr, 0, segment_len, + sample_rate_scale) + else: + buffer_list = [] + for i in range(self.v4d_segment): + # [T H W C] + buffer = self.loadvideo_one_v4d_segment(vr, i * segment_len, + segment_len, + sample_rate_scale) + buffer_list.append(buffer) + buffer = np.concatenate(buffer_list, axis=0) + return buffer + + def loadvideo_one_v4d_segment(self, + vr, + start_idx, + length, + sample_rate_scale=1): + if self.mode == 'test': + if self.sparse_sample: + tick = length / float(self.num_segment) + all_index = [] + for t_seg in range(self.test_num_segment): + tmp_index = [ + int(t_seg * tick / self.test_num_segment + tick * x) + for x in range(self.num_segment) + ] + all_index.extend(tmp_index) + all_index = list(np.sort(np.array(all_index))) + else: + all_index = [ + x for x in range(0, length, self.frame_sample_rate) + ] + while len(all_index) < self.clip_len: + all_index.append(all_index[-1]) + + vr.seek(0) + buffer = vr.get_batch(all_index).asnumpy() + return buffer + + # handle temporal segments + converted_len = int(self.clip_len * self.frame_sample_rate) + seg_len = length // self.num_segment + + all_index = [] + for i in range(self.num_segment): + if seg_len <= converted_len: + index = np.linspace( + 0, seg_len, num=seg_len // self.frame_sample_rate) + index = np.concatenate(( + index, + np.ones(self.clip_len - seg_len // self.frame_sample_rate) * + seg_len)) + index = np.clip(index, 0, seg_len - 1).astype(np.int64) + else: + if self.mode == 'validation': + end_idx = (converted_len + seg_len) // 2 + else: + end_idx = np.random.randint(converted_len, seg_len) + str_idx = end_idx - converted_len + index = np.linspace(str_idx, end_idx, num=self.clip_len) + index = np.clip(index, str_idx, end_idx - 1).astype(np.int64) + index = index + i * seg_len + all_index.extend(list(index)) + + all_index = all_index[::int(sample_rate_scale)] + all_index = [idx + start_idx for idx in all_index] + vr.seek(0) + buffer = vr.get_batch(all_index).asnumpy() + return buffer + + def __len__(self): + if self.mode != 'test': + return len(self.dataset_samples) + else: + return len(self.test_dataset) + + +def spatial_sampling( + frames, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + random_horizontal_flip=True, + inverse_uniform_sampling=False, + aspect_ratio=None, + scale=None, + motion_shift=False, +): + """ + Perform spatial sampling on the given video frames. If spatial_idx is + -1, perform random scale, random crop, and random flip on the given + frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling + with the given spatial_idx. + Args: + frames (tensor): frames of images sampled from the video. The + dimension is `num frames` x `height` x `width` x `channel`. + spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, + or 2, perform left, center, right crop if width is larger than + height, and perform top, center, buttom crop if height is larger + than width. + min_scale (int): the minimal size of scaling. + max_scale (int): the maximal size of scaling. + crop_size (int): the size of height and width used to crop the + frames. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, + max_scale]. + aspect_ratio (list): Aspect ratio range for resizing. + scale (list): Scale range for resizing. + motion_shift (bool): Whether to apply motion shift for resizing. + Returns: + frames (tensor): spatially sampled frames. + """ + assert spatial_idx in [-1, 0, 1, 2] + if spatial_idx == -1: + if aspect_ratio is None and scale is None: + frames, _ = video_transforms.random_short_side_scale_jitter( + images=frames, + min_size=min_scale, + max_size=max_scale, + inverse_uniform_sampling=inverse_uniform_sampling, + ) + frames, _ = video_transforms.random_crop(frames, crop_size) + else: + transform_func = ( + video_transforms.random_resized_crop_with_shift + if motion_shift else video_transforms.random_resized_crop) + frames = transform_func( + images=frames, + target_height=crop_size, + target_width=crop_size, + scale=scale, + ratio=aspect_ratio, + ) + if random_horizontal_flip: + frames, _ = video_transforms.horizontal_flip(0.5, frames) + else: + # The testing is deterministic and no jitter should be performed. + # min_scale, max_scale, and crop_size are expect to be the same. + assert len({min_scale, max_scale, crop_size}) == 1 + frames, _ = video_transforms.random_short_side_scale_jitter( + frames, min_scale, max_scale) + frames, _ = video_transforms.uniform_crop(frames, crop_size, + spatial_idx) + return frames + + +def tensor_normalize(tensor, mean, std): + """ + Normalize a given tensor by subtracting the mean and dividing the std. + Args: + tensor (tensor): tensor to normalize. + mean (tensor or list): mean value to subtract. + std (tensor or list): std to divide. + """ + if tensor.dtype == torch.uint8: + tensor = tensor.float() + tensor = tensor / 255.0 + if type(mean) == list: + mean = torch.tensor(mean) + if type(std) == list: + std = torch.tensor(std) + tensor = tensor - mean + tensor = tensor / std + return tensor diff --git a/Pretrain/VideoMAE/mae.py b/Pretrain/VideoMAE/mae.py new file mode 100644 index 0000000..af33e80 --- /dev/null +++ b/Pretrain/VideoMAE/mae.py @@ -0,0 +1,744 @@ +import io +import os +import random + +import cv2 +import decord +import numpy as np +import torch +from decord import VideoReader, cpu +from petrel_client.client import Client +from PIL import Image + + +class HybridVideoMAE(torch.utils.data.Dataset): + """Load your own video classification dataset. + Parameters + ---------- + root : str, required. + Path to the root folder storing the dataset. + setting : str, required. + A text file describing the dataset, each line per video sample. + There are three items in each line: (1) video path; (2) video length and (3) video label. + train : bool, default True. + Whether to load the training or validation set. + test_mode : bool, default False. + Whether to perform evaluation on the test set. + Usually there is three-crop or ten-crop evaluation strategy involved. + name_pattern : str, default None. + The naming pattern of the decoded video frames. + For example, img_00012.jpg. + video_ext : str, default 'mp4'. + If video_loader is set to True, please specify the video format accordinly. + is_color : bool, default True. + Whether the loaded image is color or grayscale. + modality : str, default 'rgb'. + Input modalities, we support only rgb video frames for now. + Will add support for rgb difference image and optical flow image later. + num_segments : int, default 1. + Number of segments to evenly divide the video into clips. + A useful technique to obtain global video-level information. + Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016. + num_crop : int, default 1. + Number of crops for each image. default is 1. + Common choices are three crops and ten crops during evaluation. + new_length : int, default 1. + The length of input video clip. Default is a single image, but it can be multiple video frames. + For example, new_length=16 means we will extract a video clip of consecutive 16 frames. + new_step : int, default 1. + Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames. + new_step=2 means we will extract a video clip of every other frame. + temporal_jitter : bool, default False. + Whether to temporally jitter if new_step > 1. + video_loader : bool, default False. + Whether to use video loader to load data. + use_decord : bool, default True. + Whether to use Decord video loader to load data. Otherwise use mmcv video loader. + transform : function, default None. + A function that takes data and label and transforms them. + data_aug : str, default 'v1'. + Different types of data augmentation auto. Supports v1, v2, v3 and v4. + lazy_init : bool, default False. + If set to True, build a dataset instance without loading any dataset. + """ + + def __init__(self, + root, + setting, + train=True, + test_mode=False, + name_pattern='img_{:05}.jpg', + video_ext='mp4', + is_color=True, + modality='rgb', + num_segments=1, + num_crop=1, + new_length=1, + new_step=1, + transform=None, + temporal_jitter=False, + video_loader=False, + use_decord=False, + lazy_init=False, + num_sample=1): + + super(HybridVideoMAE, self).__init__() + self.root = root + self.setting = setting + self.train = train + self.test_mode = test_mode + self.is_color = is_color + self.modality = modality + self.num_segments = num_segments + self.num_crop = num_crop + self.new_length = new_length + self.new_step = new_step + self.skip_length = self.new_length * self.new_step + self.temporal_jitter = temporal_jitter + self.name_pattern = name_pattern + self.video_loader = video_loader + self.video_ext = video_ext + self.use_decord = use_decord + self.transform = transform + self.lazy_init = lazy_init + self.num_sample = num_sample + + # temp for hybrid train + self.ava_fname_tmpl = 'image_{:06}.jpg' + self.ssv2_fname_tmpl = 'img_{:05}.jpg' + + self.decord = True + + self.client = Client() + + if not self.lazy_init: + self.clips = self._make_dataset(root, setting) + if len(self.clips) == 0: + raise ( + RuntimeError("Found 0 video clips in subfolders of: " + + root + "\n" + "Check your data directory (opt.data-dir).")) + + def __getitem__(self, index): + while True: + try: + video_name, start_idx, total_frame, target = self.clips[index] + if total_frame < 0: + self.new_step = 4 + self.skip_length = self.new_length * self.new_step + video_bytes = self.client.get(video_name) + decord_vr = VideoReader(io.BytesIO(video_bytes), + num_threads=1, + ctx=cpu(0)) + duration = len(decord_vr) + + segment_indices, skip_offsets = self._sample_train_indices( + duration) + frame_id_list = self.get_frame_id_list( + duration, segment_indices, skip_offsets) + video_data = decord_vr.get_batch(frame_id_list).asnumpy() + images = [ + Image.fromarray( + video_data[vid, :, :, :]).convert('RGB') + for vid, _ in enumerate(frame_id_list) + ] + break + + else: + # ssv2 & ava + if 'SomethingV2' in video_name: + self.new_step = 2 + self.skip_length = self.new_length * self.new_step + fname_tmpl = self.ssv2_fname_tmpl + elif 'AVA' in video_name: + self.new_step = 4 + self.skip_length = self.new_length * self.new_step + fname_tmpl = self.ava_fname_tmpl + else: + self.new_step = 4 + self.skip_length = self.new_length * self.new_step + fname_tmpl = self.name_pattern + + segment_indices, skip_offsets = self._sample_train_indices( + total_frame) + frame_id_list = self.get_frame_id_list( + total_frame, segment_indices, skip_offsets) + + images = [] + for idx in frame_id_list: + frame_fname = os.path.join( + video_name, fname_tmpl.format(idx + start_idx)) + img_bytes = self.client.get(frame_fname) + img_np = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(img_np, cv2.IMREAD_COLOR) + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + images.append(Image.fromarray(img)) + break + break + + except Exception as e: + print("Failed to load video from {} with error {}".format( + video_name, e)) + index = random.randint(0, len(self.clips) - 1) + + if self.num_sample > 1: + process_data_list = [] + mask_list = [] + for _ in range(self.num_sample): + process_data, mask = self.transform((images, None)) + process_data = process_data.view( + (self.new_length, 3) + process_data.size()[-2:]).transpose( + 0, 1) + process_data_list.append(process_data) + mask_list.append(mask) + return process_data_list, mask_list + else: + process_data, mask = self.transform((images, None)) + # T*C,H,W -> T,C,H,W -> C,T,H,W + process_data = process_data.view( + (self.new_length, 3) + process_data.size()[-2:]).transpose( + 0, 1) + return process_data, mask + + def __len__(self): + return len(self.clips) + + def _make_dataset(self, root, setting): + if not os.path.exists(setting): + raise (RuntimeError( + "Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " + % (setting))) + clips = [] + with open(setting) as split_f: + data = split_f.readlines() + for line in data: + line_info = line.split(' ') + # line format: video_path, video_duration, video_label + if len(line_info) < 2: + raise (RuntimeError( + 'Video input format is not correct, missing one or more element. %s' + % line)) + clip_path = os.path.join(root, line_info[0]) + start_idx = int(line_info[1]) + total_frame = int(line_info[2]) + target = int(line_info[-1]) #往往最后一列才是类别标签 + item = (clip_path, start_idx, total_frame, target) + clips.append(item) + return clips + + def _sample_train_indices(self, num_frames): + average_duration = (num_frames - self.skip_length + + 1) // self.num_segments + if average_duration > 0: + offsets = np.multiply(list(range(self.num_segments)), + average_duration) + offsets = offsets + np.random.randint(average_duration, + size=self.num_segments) + elif num_frames > max(self.num_segments, self.skip_length): + offsets = np.sort( + np.random.randint(num_frames - self.skip_length + 1, + size=self.num_segments)) + else: + offsets = np.zeros((self.num_segments, )) + + if self.temporal_jitter: + skip_offsets = np.random.randint(self.new_step, + size=self.skip_length // + self.new_step) + else: + skip_offsets = np.zeros(self.skip_length // self.new_step, + dtype=int) + return offsets + 1, skip_offsets + + def get_frame_id_list(self, duration, indices, skip_offsets): + frame_id_list = [] + for seg_ind in indices: + offset = int(seg_ind) + for i, _ in enumerate(range(0, self.skip_length, self.new_step)): + if offset + skip_offsets[i] <= duration: + frame_id = offset + skip_offsets[i] - 1 + else: + frame_id = offset - 1 + frame_id_list.append(frame_id) + if offset + self.new_step < duration: + offset += self.new_step + return frame_id_list + + +class VideoMAE(torch.utils.data.Dataset): + """Load your own video classification dataset. + Parameters + ---------- + root : str, required. + Path to the root folder storing the dataset. + setting : str, required. + A text file describing the dataset, each line per video sample. + There are three items in each line: (1) video path; (2) video length and (3) video label. + train : bool, default True. + Whether to load the training or validation set. + test_mode : bool, default False. + Whether to perform evaluation on the test set. + Usually there is three-crop or ten-crop evaluation strategy involved. + name_pattern : str, default None. + The naming pattern of the decoded video frames. + For example, img_00012.jpg. + video_ext : str, default 'mp4'. + If video_loader is set to True, please specify the video format accordinly. + is_color : bool, default True. + Whether the loaded image is color or grayscale. + modality : str, default 'rgb'. + Input modalities, we support only rgb video frames for now. + Will add support for rgb difference image and optical flow image later. + num_segments : int, default 1. + Number of segments to evenly divide the video into clips. + A useful technique to obtain global video-level information. + Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016. + num_crop : int, default 1. + Number of crops for each image. default is 1. + Common choices are three crops and ten crops during evaluation. + new_length : int, default 1. + The length of input video clip. Default is a single image, but it can be multiple video frames. + For example, new_length=16 means we will extract a video clip of consecutive 16 frames. + new_step : int, default 1. + Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames. + new_step=2 means we will extract a video clip of every other frame. + temporal_jitter : bool, default False. + Whether to temporally jitter if new_step > 1. + video_loader : bool, default False. + Whether to use video loader to load data. + use_decord : bool, default True. + Whether to use Decord video loader to load data. Otherwise use mmcv video loader. + transform : function, default None. + A function that takes data and label and transforms them. + data_aug : str, default 'v1'. + Different types of data augmentation auto. Supports v1, v2, v3 and v4. + lazy_init : bool, default False. + If set to True, build a dataset instance without loading any dataset. + """ + + def __init__(self, + root, + setting, + train=True, + test_mode=False, + name_pattern='img_{:05}.jpg', + video_ext='mp4', + is_color=True, + modality='rgb', + num_segments=1, + num_crop=1, + new_length=1, + new_step=1, + transform=None, + temporal_jitter=False, + video_loader=False, + use_decord=False, + lazy_init=False, + num_sample=1): + + super(VideoMAE, self).__init__() + self.root = root + self.setting = setting + self.train = train + self.test_mode = test_mode + self.is_color = is_color + self.modality = modality + self.num_segments = num_segments + self.num_crop = num_crop + self.new_length = new_length + self.new_step = new_step + self.skip_length = self.new_length * self.new_step + self.temporal_jitter = temporal_jitter + self.name_pattern = name_pattern + self.video_loader = video_loader + self.video_ext = video_ext + self.use_decord = use_decord + self.transform = transform + self.lazy_init = lazy_init + self.num_sample = num_sample + + self.decord = True + + self.client = Client() + + if not self.lazy_init: + self.clips = self._make_dataset(root, setting) + if len(self.clips) == 0: + raise ( + RuntimeError("Found 0 video clips in subfolders of: " + + root + "\n" + "Check your data directory (opt.data-dir).")) + + def __getitem__(self, index): + while True: + try: + video_name, start_idx, total_frame, target = self.clips[index] + if total_frame < 0: # load video + if video_name.startswith('s3:'): + video_bytes = self.client.get(video_name) + decord_vr = VideoReader(io.BytesIO(video_bytes), + num_threads=1, + ctx=cpu(0)) + else: + decord_vr = VideoReader(video_name, + num_threads=1, + ctx=cpu(0)) + duration = len(decord_vr) + + segment_indices, skip_offsets = self._sample_train_indices( + duration) + frame_id_list = self.get_frame_id_list( + duration, segment_indices, skip_offsets) + video_data = decord_vr.get_batch(frame_id_list).asnumpy() + images = [ + Image.fromarray( + video_data[vid, :, :, :]).convert('RGB') + for vid, _ in enumerate(frame_id_list) + ] + else: # load frames + segment_indices, skip_offsets = self._sample_train_indices( + total_frame) + frame_id_list = self.get_frame_id_list( + total_frame, segment_indices, skip_offsets) + + images = [] + for idx in frame_id_list: + frame_fname = os.path.join( + video_name, + self.name_pattern.format(idx + start_idx)) + if frame_fname.startswith('s3:'): + img_bytes = self.client.get(frame_fname) + img_np = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(img_np, cv2.IMREAD_COLOR) + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + img = Image.fromarray(img) + else: + with open(frame_fname, 'rb') as f: + img = Image.open(f) + img = img.convert('RGB') + images.append(img) + # walk through the if-else branch without error + # break out the while-true loop + break + + except Exception as e: + print("Failed to load video from {} with error {}".format( + video_name, e)) + index = random.randint(0, len(self.clips) - 1) + + if self.num_sample > 1: + process_data_list = [] + mask_list = [] + for _ in range(self.num_sample): + process_data, mask = self.transform((images, None)) + process_data = process_data.view( + (self.new_length, 3) + process_data.size()[-2:]).transpose( + 0, 1) + process_data_list.append(process_data) + mask_list.append(mask) + return process_data_list, mask_list + else: + process_data, mask = self.transform((images, None)) + # T*C,H,W -> T,C,H,W -> C,T,H,W + process_data = process_data.view( + (self.new_length, 3) + process_data.size()[-2:]).transpose( + 0, 1) + return process_data, mask + + def __len__(self): + return len(self.clips) + + def _make_dataset(self, root, setting): + if not os.path.exists(setting): + raise (RuntimeError( + "Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " + % (setting))) + clips = [] + with open(setting) as split_f: + data = split_f.readlines() + for line in data: + line_info = line.split(' ') + # line format: video_path, video_duration, video_label + if len(line_info) < 2: + raise (RuntimeError( + 'Video input format is not correct, missing one or more element. %s' + % line)) + clip_path = os.path.join(root, line_info[0]) + start_idx = int(line_info[1]) + total_frame = int(line_info[2]) + target = int(line_info[-1]) + item = (clip_path, start_idx, total_frame, target) + clips.append(item) + return clips + + def _sample_train_indices(self, num_frames): + average_duration = (num_frames - self.skip_length + + 1) // self.num_segments + if average_duration > 0: + offsets = np.multiply(list(range(self.num_segments)), + average_duration) + offsets = offsets + np.random.randint(average_duration, + size=self.num_segments) + elif num_frames > max(self.num_segments, self.skip_length): + offsets = np.sort( + np.random.randint(num_frames - self.skip_length + 1, + size=self.num_segments)) + else: + offsets = np.zeros((self.num_segments, )) + + if self.temporal_jitter: + skip_offsets = np.random.randint(self.new_step, + size=self.skip_length // + self.new_step) + else: + skip_offsets = np.zeros(self.skip_length // self.new_step, + dtype=int) + return offsets + 1, skip_offsets + + def get_frame_id_list(self, duration, indices, skip_offsets): + frame_id_list = [] + for seg_ind in indices: + offset = int(seg_ind) + for i, _ in enumerate(range(0, self.skip_length, self.new_step)): + if offset + skip_offsets[i] <= duration: + frame_id = offset + skip_offsets[i] - 1 + else: + frame_id = offset - 1 + frame_id_list.append(frame_id) + if offset + self.new_step < duration: + offset += self.new_step + return frame_id_list + + +class OldVideoMAE(torch.utils.data.Dataset): + """Load your own video classification dataset. + Parameters + ---------- + root : str, required. + Path to the root folder storing the dataset. + setting : str, required. + A text file describing the dataset, each line per video sample. + There are three items in each line: (1) video path; (2) video length and (3) video label. + train : bool, default True. + Whether to load the training or validation set. + test_mode : bool, default False. + Whether to perform evaluation on the test set. + Usually there is three-crop or ten-crop evaluation strategy involved. + name_pattern : str, default None. + The naming pattern of the decoded video frames. + For example, img_00012.jpg. + video_ext : str, default 'mp4'. + If video_loader is set to True, please specify the video format accordinly. + is_color : bool, default True. + Whether the loaded image is color or grayscale. + modality : str, default 'rgb'. + Input modalities, we support only rgb video frames for now. + Will add support for rgb difference image and optical flow image later. + num_segments : int, default 1. + Number of segments to evenly divide the video into clips. + A useful technique to obtain global video-level information. + Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016. + num_crop : int, default 1. + Number of crops for each image. default is 1. + Common choices are three crops and ten crops during evaluation. + new_length : int, default 1. + The length of input video clip. Default is a single image, but it can be multiple video frames. + For example, new_length=16 means we will extract a video clip of consecutive 16 frames. + new_step : int, default 1. + Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames. + new_step=2 means we will extract a video clip of every other frame. + temporal_jitter : bool, default False. + Whether to temporally jitter if new_step > 1. + video_loader : bool, default False. + Whether to use video loader to load data. + use_decord : bool, default True. + Whether to use Decord video loader to load data. Otherwise use mmcv video loader. + transform : function, default None. + A function that takes data and label and transforms them. + data_aug : str, default 'v1'. + Different types of data augmentation auto. Supports v1, v2, v3 and v4. + lazy_init : bool, default False. + If set to True, build a dataset instance without loading any dataset. + """ + + def __init__(self, + root, + setting, + train=True, + test_mode=False, + name_pattern='img_%05d.jpg', + video_ext='mp4', + is_color=True, + modality='rgb', + num_segments=1, + num_crop=1, + new_length=1, + new_step=1, + transform=None, + temporal_jitter=False, + video_loader=False, + use_decord=False, + lazy_init=False, + num_sample=1): + + super(OldVideoMAE, self).__init__() + self.root = root + self.setting = setting + self.train = train + self.test_mode = test_mode + self.is_color = is_color + self.modality = modality + self.num_segments = num_segments + self.num_crop = num_crop + self.new_length = new_length + self.new_step = new_step + self.skip_length = self.new_length * self.new_step + self.temporal_jitter = temporal_jitter + self.name_pattern = name_pattern + self.video_loader = video_loader + self.video_ext = video_ext + self.use_decord = use_decord + self.transform = transform + self.lazy_init = lazy_init + self.num_sample = num_sample + + self.decord = True + + conf_path = '~/petreloss.conf' + client = Client(conf_path) + self.client = client + if not self.lazy_init: + self.clips = self._make_dataset(root, setting) + if len(self.clips) == 0: + raise ( + RuntimeError("Found 0 video clips in subfolders of: " + + root + "\n" + "Check your data directory (opt.data-dir).")) + + def __getitem__(self, index): + while True: + try: + directory, target = self.clips[index] + if self.video_loader: + if '.' in directory.split('/')[-1]: + video_name = directory + else: + video_name = '{}.{}'.format(directory, self.video_ext) + if video_name.startswith('s3'): + video_bytes = self.client.get(video_name) + decord_vr = VideoReader(io.BytesIO(video_bytes), + num_threads=1, + ctx=cpu(0)) + else: + decord_vr = VideoReader(video_name, + num_threads=1, + ctx=cpu(0)) + duration = len(decord_vr) + if duration > 0: + break + + except Exception as e: + print("Failed to load video from {} with error {}".format( + video_name, e)) + index = random.randint(0, len(self.clips) - 1) + + if self.num_sample > 1: + process_data_list = [] + mask_list = [] + for _ in range(self.num_sample): + process_data, mask = self.get_one_sample( + decord_vr, duration, directory) + process_data_list.append(process_data) + mask_list.append(mask) + return process_data_list, mask_list + else: + return self.get_one_sample(decord_vr, duration, directory) + + def get_one_sample(self, decord_vr, duration, directory): + decord_vr.seek(0) + + segment_indices, skip_offsets = self._sample_train_indices(duration) + images = self._video_TSN_decord_batch_loader(directory, decord_vr, + duration, segment_indices, + skip_offsets) + + # T*C,H,W -> T,C,H,W -> C,T,H,W + process_data, mask = self.transform((images, None)) + process_data = process_data.view((self.new_length, 3) + + process_data.size()[-2:]).transpose( + 0, 1) + return process_data, mask + + def __len__(self): + return len(self.clips) + + def _make_dataset(self, directory, setting): + if not os.path.exists(setting): + raise (RuntimeError( + "Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " + % (setting))) + clips = [] + with open(setting) as split_f: + data = split_f.readlines() + for line in data: + line_info = line.split(' ') + # line format: video_path, video_duration, video_label + if len(line_info) < 2: + raise (RuntimeError( + 'Video input format is not correct, missing one or more element. %s' + % line)) + clip_path = os.path.join(line_info[0]) + target = int(line_info[-1]) + item = (clip_path, target) + clips.append(item) + return clips + + def _sample_train_indices(self, num_frames): + average_duration = (num_frames - self.skip_length + + 1) // self.num_segments + if average_duration > 0: + offsets = np.multiply(list(range(self.num_segments)), + average_duration) + offsets = offsets + np.random.randint(average_duration, + size=self.num_segments) + elif num_frames > max(self.num_segments, self.skip_length): + offsets = np.sort( + np.random.randint(num_frames - self.skip_length + 1, + size=self.num_segments)) + else: + offsets = np.zeros((self.num_segments, )) + + if self.temporal_jitter: + skip_offsets = np.random.randint(self.new_step, + size=self.skip_length // + self.new_step) + else: + skip_offsets = np.zeros(self.skip_length // self.new_step, + dtype=int) + return offsets + 1, skip_offsets + + def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, + indices, skip_offsets): + sampled_list = [] + frame_id_list = [] + for seg_ind in indices: + offset = int(seg_ind) + for i, _ in enumerate(range(0, self.skip_length, self.new_step)): + if offset + skip_offsets[i] <= duration: + frame_id = offset + skip_offsets[i] - 1 + else: + frame_id = offset - 1 + frame_id_list.append(frame_id) + if offset + self.new_step < duration: + offset += self.new_step + try: + video_data = video_reader.get_batch(frame_id_list).asnumpy() + sampled_list = [ + Image.fromarray(video_data[vid, :, :, :]).convert('RGB') + for vid, _ in enumerate(frame_id_list) + ] + except: + raise RuntimeError( + 'Error occured in reading frames {} from video {} of duration {}.' + .format(frame_id_list, directory, duration)) + return sampled_list diff --git a/Pretrain/VideoMAE/masking_generator.py b/Pretrain/VideoMAE/masking_generator.py new file mode 100644 index 0000000..71db2f9 --- /dev/null +++ b/Pretrain/VideoMAE/masking_generator.py @@ -0,0 +1,235 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +import random + +import numpy as np +import torch +from einops import rearrange + + +def topk(matrix, K, axis=1): + if axis == 0: + row_index = np.arange(matrix.shape[1 - axis]) + topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :] + topk_data = matrix[topk_index, row_index] + topk_index_sort = np.argsort(-topk_data, axis=axis) + topk_data_sort = topk_data[topk_index_sort, row_index] + topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index] + else: + column_index = np.arange(matrix.shape[1 - axis])[:, None] + topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K] + topk_data = matrix[column_index, topk_index] + topk_index_sort = np.argsort(-topk_data, axis=axis) + topk_data_sort = topk_data[column_index, topk_index_sort] + topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort] + return (topk_data_sort, topk_index_sort) + + +class MaskingGenerator: + + def update_state(self, epoch): + pass + + +class RandomMaskingGenerator(MaskingGenerator): + + def __init__(self, input_size, mask_ratio): + if not isinstance(input_size, tuple): + input_size = (input_size, ) * 3 + + self.frames, self.height, self.width = input_size + + self.num_patches = self.frames * self.height * self.width # 8x14x14 + self.num_mask = int(mask_ratio * self.num_patches) + + def __repr__(self): + repr_str = "Mask: total patches {}, mask patches {}".format( + self.num_patches, self.num_mask) + return repr_str + + def __call__(self): + mask = np.hstack([ + np.zeros(self.num_patches - self.num_mask), + np.ones(self.num_mask), + ]) + np.random.shuffle(mask) + return mask # [196*8] + + +class Cell(): + + def __init__(self, num_masks, num_patches): + self.num_masks = num_masks + self.num_patches = num_patches + self.size = num_masks + num_patches + self.queue = np.hstack([np.ones(num_masks), np.zeros(num_patches)]) + self.queue_ptr = 0 + + def set_ptr(self, pos=-1): + self.queue_ptr = np.random.randint(self.size) if pos < 0 else pos + + def get_cell(self): + cell_idx = (np.arange(self.size) + self.queue_ptr) % self.size + return self.queue[cell_idx] + + def run_cell(self): + self.queue_ptr += 1 + + +class CellRunningMaskingGenerator(MaskingGenerator): + + def __init__(self, input_size, mask_ratio=0.5, is_train=True): + self.frames, self.height, self.width = input_size + self.mask_ratio = mask_ratio + self.ptr_pos = -1 if is_train else 0 + + num_masks_per_cell = int(4 * self.mask_ratio) + assert 0 < num_masks_per_cell < 4 + num_patches_per_cell = 4 - num_masks_per_cell + + self.cell = Cell(num_masks_per_cell, num_patches_per_cell) + self.cell_size = self.cell.size + + mask_list = [] + for ptr_pos in range(self.cell_size): + self.cell.set_ptr(ptr_pos) + mask = [] + for _ in range(self.frames): + self.cell.run_cell() + mask_unit = self.cell.get_cell().reshape(2, 2) + mask_map = np.tile(mask_unit, + [self.height // 2, self.width // 2]) + mask.append(mask_map) + mask = np.stack(mask, axis=0).flatten() + mask_list.append(mask) + self.all_mask_maps = np.stack(mask_list, axis=0) + + def __repr__(self): + repr_str = f"Cell Running Mask with mask ratio {self.mask_ratio}" + return repr_str + + def __call__(self, batch_size): + mask_idx_list = np.random.randint(self.cell_size, size=(batch_size)) + return torch.as_tensor(self.all_mask_maps[mask_idx_list]) + + +class RandomDecodeMaskingGenerator(MaskingGenerator): + + def __init__(self, input_size, mask_ratio=0.5): + self.frame, self.height, self.width = input_size + self.mask_ratio = mask_ratio + + self.num_patches = self.frame * self.height * self.width # 8x14x14 + self.num_mask = int(mask_ratio * self.num_patches) + + def __repr__(self): + repr_str = "Mask: total patches {}, mask patches {}".format( + self.num_patches, self.num_mask) + return repr_str + + def __call__(self, batch_size): + rand = torch.as_tensor(np.random.randn(batch_size, self.num_patches)) + mask_idx = torch.topk(rand, self.num_mask, dim=-1, + sorted=False).indices + mask = torch.zeros(batch_size, + self.num_patches).scatter_(-1, mask_idx, 1) + return mask + + +class TemporalConsistencyMaskingGenerator(MaskingGenerator): + + def __init__(self, input_size, mask_ratio): + self.frames, self.height, self.width = input_size + self.num_patches_per_frame = self.height * self.width # 14x14 + self.total_patches = self.frames * self.num_patches_per_frame + self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame) + self.total_masks = self.frames * self.num_masks_per_frame + + def __repr__(self): + repr_str = "Mask: total patches {}, mask patches {}".format( + self.total_patches, self.total_masks) + return repr_str + + def __call__(self): + mask_per_frame = np.hstack([ + np.zeros(self.num_patches_per_frame - self.num_masks_per_frame), + np.ones(self.num_masks_per_frame), + ]) + np.random.shuffle(mask_per_frame) + mask = np.tile(mask_per_frame, (self.frames, 1)).flatten() + return mask # [196*8] + + +class TemporalProgressiveMaskingGenerator(MaskingGenerator): + + def __init__(self, input_size, mask_ratio): + self.frames, self.height, self.width = input_size + self.num_patches_per_frame = self.height * self.width # 14x14 + self.total_patches = self.frames * self.num_patches_per_frame # 8x14x14 + max_keep_patch = int( + (1 - mask_ratio) * self.num_patches_per_frame) # 1 - 0.75 = 0.25 + min_keep_patch = int(0.05 * self.num_patches_per_frame) + self.keep_patches_list = np.linspace(max_keep_patch, min_keep_patch, + self.frames).astype(int) + self.total_masks = self.total_patches - self.keep_patches_list.sum() + + def __repr__(self): + repr_str = "Mask: total patches {}, mask patches {}".format( + self.total_patches, self.total_masks) + return repr_str + + def __call__(self): + + rand = np.random.randn(1, self.num_patches_per_frame) + mask = np.zeros((self.frames, self.num_patches_per_frame), + dtype=np.bool) + for i in range(self.frames): + top_k, _ = topk(rand, self.keep_patches_list[i]) + the_topk = top_k[0][-1] + mask[i] = rand <= the_topk + mask = mask.flatten().astype(int) + return mask # [196*8] + + +class TemporalCenteringProgressiveMaskingGenerator(MaskingGenerator): + + def __init__(self, input_size, mask_ratio): + self.num_frames, self.height, self.width = input_size + self.num_patches_per_frame = self.height * self.width # 14x14 + self.total_patches = self.num_frames * self.num_patches_per_frame # 8x14x14 + min_mask_ratio = mask_ratio # 0.9 -> keep 19 token + # 0.979 -> keep 4 token 0.95 -> keep 9 token + max_mask_ratio = 0.95 + max_keep_patch = int( + (1 - min_mask_ratio) * self.num_patches_per_frame) # 1 - 0.9 = 0.1 + min_keep_patch = int((1 - max_mask_ratio) * + self.num_patches_per_frame) # 1 - 0.95 = 0.05 + patches_list = np.linspace(max_keep_patch, min_keep_patch, + self.num_frames // 2).astype(int).tolist() + self.keep_patches_list = patches_list.copy() + patches_list.reverse() + self.keep_patches_list = patches_list + self.keep_patches_list + self.total_masks = self.total_patches - sum(self.keep_patches_list) + + def __repr__(self): + repr_str = "Mask: total patches {}, mask patches {}".format( + self.total_patches, self.total_masks) + return repr_str + + def __call__(self): + + rand = np.random.randn(1, self.num_patches_per_frame) + mask = np.zeros((self.num_frames, self.num_patches_per_frame), + dtype=np.bool) + for i in range(self.num_frames): + top_k, _ = topk(rand, self.keep_patches_list[i]) + the_topk = top_k[0][-1] + mask[i] = rand <= the_topk + mask = mask.flatten().astype(int) + return mask # [196*8] diff --git a/Pretrain/VideoMAE/modeling_finetune.py b/Pretrain/VideoMAE/modeling_finetune.py new file mode 100644 index 0000000..18ea9f2 --- /dev/null +++ b/Pretrain/VideoMAE/modeling_finetune.py @@ -0,0 +1,560 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 400, + 'input_size': (3, 224, 224), + 'pool_size': None, + 'crop_pct': .9, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + **kwargs + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return 'p={}'.format(self.drop_prob) + + +class Mlp(nn.Module): + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + attn_head_dim=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + (self.q_bias, torch.zeros_like(self.v_bias, + requires_grad=False), + self.v_bias)) + # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + init_values=None, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + attn_head_dim=None): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + if init_values > 0: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), + requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), + requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + num_frames=16, + tubelet_size=2): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.tubelet_size = int(tubelet_size) + num_patches = (img_size[1] // patch_size[1]) * ( + img_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) + # self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d(in_channels=in_chans, + out_channels=embed_dim, + kernel_size=(self.tubelet_size, patch_size[0], + patch_size[1]), + stride=(self.tubelet_size, patch_size[0], + patch_size[1])) + + def forward(self, x, **kwargs): + B, C, T, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + # b, c, l -> b, l, c + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + ''' Sinusoid position encoding table ''' + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + # return torch.FloatTensor(sinusoid_table).unsqueeze(0) + return torch.tensor(sinusoid_table, dtype=torch.float, + requires_grad=False).unsqueeze(0) + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + head_drop_rate=0., + norm_layer=nn.LayerNorm, + init_values=0., + use_learnable_pos_emb=False, + init_scale=0., + all_frames=16, + tubelet_size=2, + use_mean_pooling=True, + with_cp=False, + num_segment=1): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed(img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + num_frames=all_frames, + tubelet_size=self.tubelet_size) + num_patches = self.patch_embed.num_patches + self.with_cp = with_cp + self.num_segment = num_segment + + # self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + if img_size != 224 or all_frames != 16: + org_img_size = (224, 224) + org_num_frames = 16 + num_patches = (org_img_size[1] // patch_size) * (org_img_size[0] // patch_size) * ( + org_num_frames // self.tubelet_size) + self.pos_embed = get_sinusoid_encoding_table( + num_patches, embed_dim) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block(dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values) for i in range(depth) + ]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer( + embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head_dropout = nn.Dropout(head_drop_rate) + self.head = nn.Linear( + embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + + # trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.head.weight, std=.02) + self.apply(self._init_weights) + + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear( + self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + B, _, _ = x.size() + + # cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + # x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to( + x.device).clone().detach() + x = self.pos_drop(x) + + for blk in self.blocks: + if self.with_cp: + x = cp.checkpoint(blk, x) + else: + x = blk(x) + + x = self.norm(x) + if self.fc_norm is not None: + # return self.fc_norm(x[:, 1:].mean(1)) + return self.fc_norm(x.mean(1)) + else: + return x[:, 0] + + def forward_check_variance(self, x): + x = self.patch_embed(x) + B, _, _ = x.size() + + # cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + # x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed.expand(B, -1, -1).type_as(x).to( + x.device).clone().detach() + x = self.pos_drop(x) + + # x [B, N, C] + avg_var_list = [] + for blk in self.blocks: + x = blk(x) + avg_var = torch.mean(torch.var(x, dim=-1)) + avg_var_list.append(avg_var) + + for i, avg_var in enumerate(avg_var_list): + print(f'avg variance of block {i}: {avg_var}', flush=True) + + x = self.norm(x) + if self.fc_norm is not None: + # return self.fc_norm(x[:, 1:].mean(1)) + return self.fc_norm(x.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + # x = self.forward_check_variance(x) + x = self.forward_features(x) + x = self.head_dropout(x) + x = self.head(x) + if self.num_segment > 1 and self.training: + x = x.view((-1, self.num_segment) + x.size()[1:]) + x = x.mean(dim=1) + return x + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + model = VisionTransformer(img_size=384, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_512(pretrained=False, **kwargs): + model = VisionTransformer(img_size=512, + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_huge_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_giant_patch14_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=14, + embed_dim=1408, + depth=40, + num_heads=16, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_giant_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=16, + embed_dim=1408, + depth=40, + num_heads=16, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_gigantic_patch14_224(pretrained=False, **kwargs): + model = VisionTransformer(patch_size=14, + embed_dim=1664, + depth=48, + num_heads=16, + mlp_ratio=64 / 13, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + return model diff --git a/Pretrain/VideoMAE/modeling_pretrain.py b/Pretrain/VideoMAE/modeling_pretrain.py new file mode 100644 index 0000000..9757a48 --- /dev/null +++ b/Pretrain/VideoMAE/modeling_pretrain.py @@ -0,0 +1,548 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from timm.models.layers import trunc_normal_ as __call_trunc_normal_ +from timm.models.registry import register_model + +from modeling_finetune import Block, PatchEmbed, _cfg, get_sinusoid_encoding_table + + +def trunc_normal_(tensor, mean=0., std=1.): + __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + + +__all__ = [ + 'pretrain_mae_base_patch16_224', + 'pretrain_mae_large_patch16_224', + 'pretrain_mae_huge_patch16_224', + 'pretrain_mae_giant_patch14_224', + 'pretrain_mae_gigantic_patch14_224', +] + + +class PretrainVisionTransformerEncoder(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=0, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=None, + tubelet_size=2, + use_learnable_pos_emb=False, + with_cp=False): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + tubelet_size=tubelet_size) + num_patches = self.patch_embed.num_patches + self.with_cp = with_cp + + # TODO: Add the cls token + # self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + 1, embed_dim)) + else: + # sine-cosine positional embeddings + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values) for i in range(depth) + ]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear( + embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=.02) + + # trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear( + self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x, mask): + _, _, T, _, _ = x.shape + x = self.patch_embed(x) + + # cls_tokens = self.cls_token.expand(batch_size, -1, -1) + # x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() + + B, _, C = x.shape + x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible + + for blk in self.blocks: + if self.with_cp: + x_vis = cp.checkpoint(blk, x_vis) + else: + x_vis = blk(x_vis) + + x_vis = self.norm(x_vis) + return x_vis + + def forward_check_variance(self, x, mask): + _, _, T, _, _ = x.shape + x = self.patch_embed(x) + + # cls_tokens = self.cls_token.expand(batch_size, -1, -1) + # x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() + + B, _, C = x.shape + x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible + + # x [B, N, C] + avg_var_list = [] + for blk in self.blocks: + x_vis = blk(x_vis) + avg_var = torch.mean(torch.var(x_vis, dim=-1)) + avg_var_list.append(avg_var) + + for i, avg_var in enumerate(avg_var_list): + print(f'avg variance of block {i}: {avg_var}', flush=True) + + x_vis = self.norm(x_vis) + return x_vis + + def forward(self, x, mask): + # x = self.forward_check_variance(x, mask) + x = self.forward_features(x, mask) + x = self.head(x) + return x + + +class PretrainVisionTransformerDecoder(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, + patch_size=16, + num_classes=768, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=None, + num_patches=196, + tubelet_size=2, + with_cp=False, + with_fp16=True): + super().__init__() + self.num_classes = num_classes + assert num_classes == 3 * tubelet_size * patch_size**2 + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.patch_size = patch_size + self.with_cp = with_cp + self.with_fp16 = with_fp16 + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values) for i in range(depth) + ]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear( + embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear( + self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x, return_token_num): + with torch.cuda.amp.autocast_mode.autocast(enabled=self.with_fp16): + for blk in self.blocks: + if self.with_cp: + x = cp.checkpoint(blk, x) + else: + x = blk(x) + + if return_token_num > 0: + # only return the mask tokens predict pixels + x = self.head(self.norm(x[:, -return_token_num:])) + else: + # [B, N, 3*16^2] + x = self.head(self.norm(x)) + return x + + +class PretrainVisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, + img_size=224, + patch_size=16, + encoder_in_chans=3, + encoder_num_classes=0, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + decoder_num_classes=1536, # decoder_num_classes=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_layer=nn.LayerNorm, + init_values=0., + use_learnable_pos_emb=False, + tubelet_size=2, + num_classes=0, # avoid the error from create_fn in timm + in_chans=0, # avoid the error from create_fn in timm + with_cp=False, + ): + super().__init__() + self.encoder = PretrainVisionTransformerEncoder( + img_size=img_size, + patch_size=patch_size, + in_chans=encoder_in_chans, + num_classes=encoder_num_classes, + embed_dim=encoder_embed_dim, + depth=encoder_depth, + num_heads=encoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + use_learnable_pos_emb=use_learnable_pos_emb, + with_cp=with_cp) + + self.decoder = PretrainVisionTransformerDecoder( + patch_size=patch_size, + num_patches=self.encoder.patch_embed.num_patches, + num_classes=decoder_num_classes, + embed_dim=decoder_embed_dim, + depth=decoder_depth, + num_heads=decoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + with_cp=with_cp, + with_fp16=True) + + self.encoder_to_decoder = nn.Linear( + encoder_embed_dim, decoder_embed_dim, bias=False) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + self.pos_embed = get_sinusoid_encoding_table( + self.encoder.patch_embed.num_patches, decoder_embed_dim) + + trunc_normal_(self.mask_token, std=.02) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'mask_token'} + + def forward(self, x, mask): + _, _, T, _, _ = x.shape + x_vis = self.encoder(x, mask) # [B, N_vis, C_e] + x_vis = self.encoder_to_decoder(x_vis) # [B, N_vis, C_d] + B, N_vis, C = x_vis.shape + + # we don't unshuffle the correct visible token order, + # but shuffle the pos embedding accorddingly. + expand_pos_embed = self.pos_embed.expand(B, -1, -1).type_as(x).to( + x.device).clone().detach() + pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C) + pos_emd_mask = expand_pos_embed[mask].reshape(B, -1, C) + + x_full = torch.cat( + [x_vis + pos_emd_vis, self.mask_token + pos_emd_mask], + dim=1) # [B, N, C_d] + # notice: if N_mask==0, the shape of x is [B, N_mask, 3 * 16 * 16] + x = self.decoder(x_full, + pos_emd_mask.shape[1]) # [B, N_mask, 3 * 16 * 16] + + return x + + +@register_model +def pretrain_mae_small_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=384, + encoder_depth=12, + encoder_num_heads=6, + encoder_num_classes=0, + decoder_num_classes=1536, # decoder_num_classes=768, + decoder_embed_dim=192, #decoder_depth=4, + decoder_num_heads=3, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_mae_base_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_num_classes=0, + decoder_num_classes=1536, # decoder_num_classes=768, + decoder_embed_dim=384, #decoder_depth=4, + decoder_num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_mae_large_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, # decoder_num_classes=768, + decoder_embed_dim=512, #decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_mae_huge_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, # decoder_num_classes=768, + decoder_embed_dim=512, #decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_mae_giant_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1408, + encoder_depth=40, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_mae_giant_patch14_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=14, + encoder_embed_dim=1408, + encoder_depth=40, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1176, + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_mae_gigantic_patch14_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=14, + encoder_embed_dim=1664, + encoder_depth=48, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1176, # decoder_num_classes=768, + decoder_embed_dim=512, #decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=64 / 13, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model diff --git a/Pretrain/VideoMAE/optim_factory.py b/Pretrain/VideoMAE/optim_factory.py new file mode 100644 index 0000000..7d1352a --- /dev/null +++ b/Pretrain/VideoMAE/optim_factory.py @@ -0,0 +1,219 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import json + +import torch +from timm.optim.adafactor import Adafactor +from timm.optim.adahessian import Adahessian +from timm.optim.adamp import AdamP +from timm.optim.lookahead import Lookahead +from timm.optim.nadam import Nadam +from timm.optim.novograd import NovoGrad +from timm.optim.nvnovograd import NvNovoGrad +from timm.optim.radam import RAdam +from timm.optim.rmsprop_tf import RMSpropTF +from timm.optim.sgdp import SGDP +from torch import optim as optim + +try: + from apex.optimizers import FusedAdam, FusedLAMB, FusedNovoGrad, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def get_num_layer_for_vit(var_name, num_max_layer): + if var_name in ("cls_token", "mask_token", "pos_embed"): + return 0 + elif var_name.startswith("patch_embed"): + return -1 + elif var_name.startswith("rel_pos_bias"): + return num_max_layer - 1 + elif var_name.startswith("blocks"): + layer_id = int(var_name.split('.')[1]) + return layer_id + 1 + else: + return num_max_layer - 1 + + +class LayerDecayValueAssigner(object): + + def __init__(self, values, extra_decay=1.0): + assert extra_decay >= 0. + self.values = values + self.extra_scale = extra_decay + + def get_scale(self, layer_id, extra_scale=1.0): + if layer_id < 0: + return extra_scale * self.values[0] + return self.values[layer_id] + + def get_layer_id(self, var_name): + return get_num_layer_for_vit(var_name, len(self.values)) + + +def get_parameter_groups(model, + weight_decay=1e-5, + skip_list=(), + get_num_layer=None, + get_layer_scale=None): + parameter_group_names = {} + parameter_group_vars = {} + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith( + ".bias") or name in skip_list: + group_name = "no_decay" + this_weight_decay = 0. + else: + group_name = "decay" + this_weight_decay = weight_decay + if get_num_layer is not None: + layer_id = get_num_layer(name) + group_name = "layer_%d_%s" % (layer_id, group_name) + else: + layer_id = None + + if group_name not in parameter_group_names: + if get_layer_scale is not None: + scale = get_layer_scale(layer_id) + else: + scale = 1. + + parameter_group_names[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + parameter_group_vars[group_name] = { + "weight_decay": this_weight_decay, + "params": [], + "lr_scale": scale + } + + parameter_group_vars[group_name]["params"].append(param) + parameter_group_names[group_name]["params"].append(name) + print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) + return list(parameter_group_vars.values()) + + +def create_optimizer(args, + model, + get_num_layer=None, + get_layer_scale=None, + filter_bias_and_bn=True, + skip_list=None): + opt_lower = args.opt.lower() + weight_decay = args.weight_decay + if weight_decay and filter_bias_and_bn: + skip = {} + if skip_list is not None: + skip = skip_list + elif hasattr(model, 'no_weight_decay'): + skip = model.no_weight_decay() + parameters = get_parameter_groups(model, weight_decay, skip, + get_num_layer, get_layer_scale) + weight_decay = 0. + else: + parameters = model.parameters() + + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available( + ), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(lr=args.lr, weight_decay=weight_decay) + if hasattr(args, 'opt_eps') and args.opt_eps is not None: + opt_args['eps'] = args.opt_eps + if hasattr(args, 'opt_betas') and args.opt_betas is not None: + opt_args['betas'] = args.opt_betas + + print("optimizer settings:", opt_args) + + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if opt_lower == 'sgd' or opt_lower == 'nesterov': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, + momentum=args.momentum, + nesterov=True, + **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, + momentum=args.momentum, + nesterov=False, + **opt_args) + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'nadam': + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, + momentum=args.momentum, + nesterov=True, + **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adafactor': + if not args.lr: + opt_args['lr'] = None + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, + alpha=0.9, + momentum=args.momentum, + **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, + alpha=0.9, + momentum=args.momentum, + **opt_args) + elif opt_lower == 'novograd': + optimizer = NovoGrad(parameters, **opt_args) + elif opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, + momentum=args.momentum, + nesterov=True, + **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, + momentum=args.momentum, + nesterov=False, + **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/Pretrain/VideoMAE/rand_augment.py b/Pretrain/VideoMAE/rand_augment.py new file mode 100644 index 0000000..fa0b0b2 --- /dev/null +++ b/Pretrain/VideoMAE/rand_augment.py @@ -0,0 +1,534 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py +pulished under an Apache License 2.0. + +COMMENT FROM ORIGINAL: +AutoAugment, RandAugment, and AugMix for PyTorch +This code implements the searched ImageNet policies with various tweaks and +improvements and does not include any of the search code. AA and RA +Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py +AugMix adapted from: + https://github.com/google-research/augmix +Papers: + AutoAugment: Learning Augmentation Policies from Data + https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection + https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... + https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and + Uncertainty https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +import random +import re + +import numpy as np +import PIL +from PIL import Image, ImageEnhance, ImageOps + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]]) + +_FILL = (128, 128, 128) + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10.0 + +_HPARAMS_DEFAULT = { + "translate_const": 250, + "img_mean": _FILL, +} + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop("resample", Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if "fillcolor" in kwargs and _PIL_VER < (5, 0): + kwargs.pop("fillcolor") + kwargs["resample"] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs + ) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs + ) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], + -rotn_center[1] - post_trans[1], + matrix, + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs["resample"]) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _MAX_LEVEL) * 30.0 + level = _randomly_negate(level) + return (level,) + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return ((level / _MAX_LEVEL) * 1.8 + 0.1,) + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] + level = (level / _MAX_LEVEL) * 0.9 + level = 1.0 + _randomly_negate(level) + return (level,) + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _MAX_LEVEL) * 0.3 + level = _randomly_negate(level) + return (level,) + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams["translate_const"] + level = (level / _MAX_LEVEL) * float(translate_const) + level = _randomly_negate(level) + return (level,) + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get("translate_pct", 0.45) + level = (level / _MAX_LEVEL) * translate_pct + level = _randomly_negate(level) + return (level,) + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4),) + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return (4 - _posterize_level_to_arg(level, hparams)[0],) + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4) + 4,) + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 256),) + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return (256 - _solarize_level_to_arg(level, _hparams)[0],) + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return (int((level / _MAX_LEVEL) * 110),) + + +LEVEL_TO_ARG = { + "AutoContrast": None, + "Equalize": None, + "Invert": None, + "Rotate": _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + "Posterize": _posterize_level_to_arg, + "PosterizeIncreasing": _posterize_increasing_level_to_arg, + "PosterizeOriginal": _posterize_original_level_to_arg, + "Solarize": _solarize_level_to_arg, + "SolarizeIncreasing": _solarize_increasing_level_to_arg, + "SolarizeAdd": _solarize_add_level_to_arg, + "Color": _enhance_level_to_arg, + "ColorIncreasing": _enhance_increasing_level_to_arg, + "Contrast": _enhance_level_to_arg, + "ContrastIncreasing": _enhance_increasing_level_to_arg, + "Brightness": _enhance_level_to_arg, + "BrightnessIncreasing": _enhance_increasing_level_to_arg, + "Sharpness": _enhance_level_to_arg, + "SharpnessIncreasing": _enhance_increasing_level_to_arg, + "ShearX": _shear_level_to_arg, + "ShearY": _shear_level_to_arg, + "TranslateX": _translate_abs_level_to_arg, + "TranslateY": _translate_abs_level_to_arg, + "TranslateXRel": _translate_rel_level_to_arg, + "TranslateYRel": _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + "AutoContrast": auto_contrast, + "Equalize": equalize, + "Invert": invert, + "Rotate": rotate, + "Posterize": posterize, + "PosterizeIncreasing": posterize, + "PosterizeOriginal": posterize, + "Solarize": solarize, + "SolarizeIncreasing": solarize, + "SolarizeAdd": solarize_add, + "Color": color, + "ColorIncreasing": color, + "Contrast": contrast, + "ContrastIncreasing": contrast, + "Brightness": brightness, + "BrightnessIncreasing": brightness, + "Sharpness": sharpness, + "SharpnessIncreasing": sharpness, + "ShearX": shear_x, + "ShearY": shear_y, + "TranslateX": translate_x_abs, + "TranslateY": translate_y_abs, + "TranslateXRel": translate_x_rel, + "TranslateYRel": translate_y_rel, +} + + +class AugmentOp: + """ + Apply for video. + """ + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = { + "fillcolor": hparams["img_mean"] + if "img_mean" in hparams + else _FILL, + "resample": hparams["interpolation"] + if "interpolation" in hparams + else _RANDOM_INTERPOLATION, + } + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + self.magnitude_std = self.hparams.get("magnitude_std", 0) + + def __call__(self, img_list): + if self.prob < 1.0 and random.random() > self.prob: + return img_list + magnitude = self.magnitude + if self.magnitude_std and self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range + level_args = ( + self.level_fn(magnitude, self.hparams) + if self.level_fn is not None + else () + ) + + if isinstance(img_list, list): + return [ + self.aug_fn(img, *level_args, **self.kwargs) for img in img_list + ] + else: + return self.aug_fn(img_list, *level_args, **self.kwargs) + + +_RAND_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "Posterize", + "Solarize", + "SolarizeAdd", + "Color", + "Contrast", + "Brightness", + "Sharpness", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +_RAND_INCREASING_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "PosterizeIncreasing", + "SolarizeIncreasing", + "SolarizeAdd", + "ColorIncreasing", + "ContrastIncreasing", + "BrightnessIncreasing", + "SharpnessIncreasing", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + "Rotate": 0.3, + "ShearX": 0.2, + "ShearY": 0.2, + "TranslateXRel": 0.1, + "TranslateYRel": 0.1, + "Color": 0.025, + "Sharpness": 0.025, + "AutoContrast": 0.025, + "Solarize": 0.005, + "SolarizeAdd": 0.005, + "Contrast": 0.005, + "Brightness": 0.005, + "Equalize": 0.005, + "Posterize": 0, + "Invert": 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [ + AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) + for name in transforms + ] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, + self.num_layers, + replace=self.choice_weights is None, + p=self.choice_weights, + ) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + + Create a RandAugment transform + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + :return: A PyTorch compatible Transform + """ + magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split("-") + assert config[0] == "rand" + config = config[1:] + for c in config: + cs = re.split(r"(\d.*)", c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == "mstd": + # noise param injected via hparams for now + hparams.setdefault("magnitude_std", float(val)) + elif key == "inc": + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == "m": + magnitude = int(val) + elif key == "n": + num_layers = int(val) + elif key == "w": + weight_idx = int(val) + else: + assert NotImplementedError + ra_ops = rand_augment_ops( + magnitude=magnitude, hparams=hparams, transforms=transforms + ) + choice_weights = ( + None if weight_idx is None else _select_rand_weights(weight_idx) + ) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) diff --git a/Pretrain/VideoMAE/random_erasing.py b/Pretrain/VideoMAE/random_erasing.py new file mode 100644 index 0000000..eea47cd --- /dev/null +++ b/Pretrain/VideoMAE/random_erasing.py @@ -0,0 +1,181 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py +pulished under an Apache License 2.0. + +COMMENT FROM ORIGINAL: +Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 +Copyright Zhun Zhong & Liang Zheng +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import random + +import torch + + +def _get_pixels( + per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda" +): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty( + (patch_size[0], 1, 1), dtype=dtype, device=device + ).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, + min_area=0.02, + max_area=1 / 3, + min_aspect=0.3, + max_aspect=None, + mode="const", + min_count=1, + max_count=None, + num_splits=0, + device="cuda", + cube=True, + ): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + self.cube = cube + if mode == "rand": + self.rand_color = True # per block random normal + elif mode == "pixel": + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == "const" + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(10): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top : top + h, left : left + w] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def _erase_cube( + self, + img, + batch_start, + batch_size, + chan, + img_h, + img_w, + dtype, + ): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(100): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + for i in range(batch_start, batch_size): + img_instance = img[i] + img_instance[ + :, top : top + h, left : left + w + ] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = ( + batch_size // self.num_splits if self.num_splits > 1 else 0 + ) + if self.cube: + self._erase_cube( + input, + batch_start, + batch_size, + chan, + img_h, + img_w, + input.dtype, + ) + else: + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input diff --git a/Pretrain/VideoMAE/run_class_finetuning.py b/Pretrain/VideoMAE/run_class_finetuning.py new file mode 100644 index 0000000..ac98524 --- /dev/null +++ b/Pretrain/VideoMAE/run_class_finetuning.py @@ -0,0 +1,927 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' + +import argparse +import datetime +import json +import os +import time +from collections import OrderedDict +from functools import partial +from pathlib import Path + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from scipy import interpolate +from timm.data.mixup import Mixup +from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from timm.models import create_model +from timm.utils import ModelEma + +import modeling_finetune +import utils +from datasets import build_dataset +from engine_for_finetuning import ( + final_test, + merge, + train_one_epoch, + validation_one_epoch, +) +from optim_factory import ( + LayerDecayValueAssigner, + create_optimizer, + get_parameter_groups, +) +from utils import NativeScalerWithGradNormCount as NativeScaler +from utils import multiple_samples_collate + + +def get_args(): + parser = argparse.ArgumentParser( + 'MAE fine-tuning and evaluation script for image classification', + add_help=False) + parser.add_argument('--batch_size', default=64, type=int) + parser.add_argument('--epochs', default=30, type=int) + parser.add_argument('--update_freq', default=1, type=int) + parser.add_argument('--save_ckpt_freq', default=100, type=int) + + # Model parameters + parser.add_argument('--model', + default='deit_base_patch16_224', + type=str, + metavar='MODEL', + help='Name of model to train') + parser.add_argument('--tubelet_size', type=int, default=2) + parser.add_argument('--input_size', + default=224, + type=int, + help='images input size') + + parser.add_argument('--with_checkpoint', + action='store_true', + default=False) + + parser.add_argument('--drop', + type=float, + default=0.0, + metavar='PCT', + help='Dropout rate (default: 0.)') + parser.add_argument('--attn_drop_rate', + type=float, + default=0.0, + metavar='PCT', + help='Attention dropout rate (default: 0.)') + parser.add_argument('--drop_path', + type=float, + default=0.1, + metavar='PCT', + help='Drop path rate (default: 0.1)') + parser.add_argument('--head_drop_rate', + type=float, + default=0.0, + metavar='PCT', + help='cls head dropout rate (default: 0.)') + + parser.add_argument('--disable_eval_during_finetuning', + action='store_true', + default=False) + + parser.add_argument('--model_ema', action='store_true', default=False) + parser.add_argument('--model_ema_decay', + type=float, + default=0.9999, + help='') + parser.add_argument('--model_ema_force_cpu', + action='store_true', + default=False, + help='') + + # Optimizer parameters + parser.add_argument('--opt', + default='adamw', + type=str, + metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument('--opt_eps', + default=1e-8, + type=float, + metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument( + '--opt_betas', + default=None, + type=float, + nargs='+', + metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument('--clip_grad', + type=float, + default=None, + metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument('--momentum', + type=float, + default=0.9, + metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight_decay', + type=float, + default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument('--weight_decay_end', + type=float, + default=None, + help="""Final value of the + weight decay. We use a cosine schedule for WD and using a larger decay by + the end of training improves performance for ViTs.""") + + parser.add_argument('--lr', + type=float, + default=1e-3, + metavar='LR', + help='learning rate (default: 1e-3)') + parser.add_argument('--layer_decay', type=float, default=0.75) + parser.add_argument('--extra_decay', type=float, default=1.0) + + parser.add_argument('--warmup_lr', + type=float, + default=1e-8, + metavar='LR', + help='warmup learning rate (default: 1e-6)') + parser.add_argument( + '--min_lr', + type=float, + default=1e-6, + metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + + parser.add_argument('--warmup_epochs', + type=int, + default=5, + metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument( + '--warmup_steps', + type=int, + default=-1, + metavar='N', + help='num of steps to warmup LR, will overload warmup_epochs if set > 0' + ) + + # Augmentation parameters + parser.add_argument('--color_jitter', + type=float, + default=0.4, + metavar='PCT', + help='Color jitter factor (default: 0.4)') + parser.add_argument('--num_sample', + type=int, + default=2, + help='Repeated_aug (default: 2)') + parser.add_argument('--num_segment', + type=int, + default=1, + help='segments used for v4d (default: 1)') + # parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', + parser.add_argument( + '--aa', + type=str, + default='rand-m7-n4-mstd0.5-inc1', + metavar='NAME', + help= + 'Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)' + ), + parser.add_argument('--smoothing', + type=float, + default=0.1, + help='Label smoothing (default: 0.1)') + parser.add_argument( + '--train_interpolation', + type=str, + default='bicubic', + help= + 'Training interpolation (random, bilinear, bicubic default: "bicubic")' + ) + + # Evaluation parameters + parser.add_argument('--crop_pct', type=float, default=None) + parser.add_argument('--short_side_size', type=int, default=224) + parser.add_argument('--test_num_segment', type=int, default=10) + parser.add_argument('--test_num_crop', type=int, default=3) + + # * Random Erase params + parser.add_argument('--reprob', + type=float, + default=0.25, + metavar='PCT', + help='Random erase prob (default: 0.25)') + parser.add_argument('--remode', + type=str, + default='pixel', + help='Random erase mode (default: "pixel")') + parser.add_argument('--recount', + type=int, + default=1, + help='Random erase count (default: 1)') + parser.add_argument( + '--resplit', + action='store_true', + default=False, + help='Do not random erase first (clean) augmentation split') + + # * Mixup params + parser.add_argument('--mixup', + type=float, + default=0.8, + help='mixup alpha, mixup enabled if > 0.') + parser.add_argument('--cutmix', + type=float, + default=1.0, + help='cutmix alpha, cutmix enabled if > 0.') + parser.add_argument( + '--cutmix_minmax', + type=float, + nargs='+', + default=None, + help= + 'cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)' + ) + parser.add_argument( + '--mixup_prob', + type=float, + default=1.0, + help= + 'Probability of performing mixup or cutmix when either/both is enabled' + ) + parser.add_argument( + '--mixup_switch_prob', + type=float, + default=0.5, + help= + 'Probability of switching to cutmix when both mixup and cutmix enabled' + ) + parser.add_argument( + '--mixup_mode', + type=str, + default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"' + ) + + # * Finetuning params + parser.add_argument('--finetune', + default='', + help='finetune from checkpoint') + parser.add_argument('--model_key', default='model|module', type=str) + parser.add_argument('--model_prefix', default='', type=str) + parser.add_argument('--init_scale', default=0.001, type=float) + parser.add_argument('--use_mean_pooling', action='store_true') + parser.set_defaults(use_mean_pooling=True) + parser.add_argument('--use_cls', + action='store_false', + dest='use_mean_pooling') + + # Dataset parameters + parser.add_argument('--data_path', + default='/datasets01/imagenet_full_size/061417/', + type=str, + help='dataset path') + parser.add_argument('--eval_data_path', + default=None, + type=str, + help='dataset path for evaluation') + parser.add_argument('--nb_classes', + default=400, + type=int, + help='number of the classification types') + parser.add_argument('--imagenet_default_mean_and_std', + default=True, + action='store_true') + parser.add_argument('--num_segments', type=int, default=1) + parser.add_argument('--num_frames', type=int, default=16) + parser.add_argument('--sampling_rate', type=int, default=4) + parser.add_argument('--sparse_sample', default=False, action='store_true') + parser.add_argument('--data_set', + default='Kinetics-400', + choices=[ + 'Kinetics-400', 'Kinetics-600', 'Kinetics-700', + 'SSV2', 'UCF101', 'HMDB51', 'Diving48', + 'image_folder', 'ANet', 'MixKinetics' + ], + type=str, + help='ImageNet dataset path') + parser.add_argument('--output_dir', + default='', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', + default=None, + help='path where to tensorboard log') + parser.add_argument('--device', + default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument('--no_auto_resume', + action='store_false', + dest='auto_resume') + parser.set_defaults(auto_resume=True) + + parser.add_argument('--save_ckpt', action='store_true') + parser.add_argument('--no_save_ckpt', + action='store_false', + dest='save_ckpt') + parser.set_defaults(save_ckpt=True) + + parser.add_argument('--start_epoch', + default=0, + type=int, + metavar='N', + help='start epoch') + parser.add_argument('--eval', + action='store_true', + help='Perform evaluation only') + parser.add_argument('--validation', + action='store_true', + help='Perform validation only') + parser.add_argument('--dist_eval', + action='store_true', + default=False, + help='Enabling distributed evaluation') + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument( + '--pin_mem', + action='store_true', + help= + 'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.' + ) + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', + default=1, + type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', + default='env://', + help='url used to set up distributed training') + + parser.add_argument('--enable_deepspeed', + action='store_true', + default=False) + + known_args, _ = parser.parse_known_args() + + if known_args.enable_deepspeed: + # try: + import deepspeed + from deepspeed import DeepSpeedConfig + parser = deepspeed.add_config_arguments(parser) + ds_init = deepspeed.initialize + # except: + # print("Please 'pip install deepspeed==0.4.0'") + # exit(0) + else: + ds_init = None + + return parser.parse_args(), ds_init + + +def main(args, ds_init): + utils.init_distributed_mode(args) + + if ds_init is not None: + utils.create_ds_config(args) + + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + # random.seed(seed) + + cudnn.benchmark = True + + dataset_train, args.nb_classes = build_dataset(is_train=True, + test_mode=False, + args=args) + if args.disable_eval_during_finetuning: + dataset_val = None + else: + dataset_val, _ = build_dataset(is_train=False, + test_mode=False, + args=args) + dataset_test, _ = build_dataset(is_train=False, test_mode=True, args=args) + + if True: # args.distributed: + num_tasks = utils.get_world_size() + global_rank = utils.get_rank() + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, + num_replicas=num_tasks, + rank=global_rank, + shuffle=True) + print("Sampler_train = %s" % str(sampler_train)) + if args.dist_eval: + if len(dataset_val) % num_tasks != 0: + print( + 'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' + 'This will slightly alter validation results as extra duplicate entries are added to achieve ' + 'equal num of samples per-process.') + sampler_val = torch.utils.data.DistributedSampler( + dataset_val, + num_replicas=num_tasks, + rank=global_rank, + shuffle=False) + sampler_test = torch.utils.data.DistributedSampler( + dataset_test, + num_replicas=num_tasks, + rank=global_rank, + shuffle=False) + else: + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + else: + sampler_train = torch.utils.data.RandomSampler(dataset_train) + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + if global_rank == 0 and args.log_dir is not None: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = utils.TensorboardLogger(log_dir=args.log_dir) + else: + log_writer = None + + if args.num_sample > 1: + collate_func = partial(multiple_samples_collate, fold=False) + else: + collate_func = None + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True, + collate_fn=collate_func, + persistent_workers=True) + + if dataset_val is not None: + data_loader_val = torch.utils.data.DataLoader( + dataset_val, + sampler=sampler_val, + batch_size=int(1.5 * args.batch_size), + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False, + persistent_workers=True) + else: + data_loader_val = None + + if dataset_test is not None: + data_loader_test = torch.utils.data.DataLoader( + dataset_test, + sampler=sampler_test, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False, + persistent_workers=True) + else: + data_loader_test = None + + mixup_fn = None + mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None + if mixup_active: + print("Mixup is activated!") + mixup_fn = Mixup(mixup_alpha=args.mixup, + cutmix_alpha=args.cutmix, + cutmix_minmax=args.cutmix_minmax, + prob=args.mixup_prob, + switch_prob=args.mixup_switch_prob, + mode=args.mixup_mode, + label_smoothing=args.smoothing, + num_classes=args.nb_classes) + + model = create_model( + args.model, + img_size=args.input_size, + pretrained=False, + num_classes=args.nb_classes, + all_frames=args.num_frames * args.num_segments, + tubelet_size=args.tubelet_size, + drop_rate=args.drop, + drop_path_rate=args.drop_path, + attn_drop_rate=args.attn_drop_rate, + head_drop_rate=args.head_drop_rate, + drop_block_rate=None, + use_mean_pooling=args.use_mean_pooling, + init_scale=args.init_scale, + with_cp=args.with_checkpoint, + num_segment=args.num_segment, + ) + + patch_size = model.patch_embed.patch_size + print("Patch size = %s" % str(patch_size)) + + # args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1]) + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], + args.input_size // patch_size[1]) + + args.patch_size = patch_size + + if args.finetune: + if args.finetune.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url(args.finetune, + map_location='cpu', + check_hash=True) + else: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load ckpt from %s" % args.finetune) + checkpoint_model = None + for model_key in args.model_key.split('|'): + if model_key in checkpoint: + checkpoint_model = checkpoint[model_key] + print("Load state_dict by model_key = %s" % model_key) + break + if checkpoint_model is None: + checkpoint_model = checkpoint + state_dict = model.state_dict() + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[ + k].shape != state_dict[k].shape: + if checkpoint_model[k].shape[0] == 710: + if args.data_set == 'Kinetics-400': + print('Convert to k400 class head') + checkpoint_model[k] = checkpoint_model[k][:400] + elif args.data_set == 'Kinetics-600': + print('Convert to k600 class head') + label_map_path = '/mnt/petrelfs/huangbingkun/data/mix_kinetics/label_mixto600.json' + label_map = json.load(open(label_map_path)) + checkpoint_model[k] = checkpoint_model[k][label_map] + elif args.data_set == 'Kinetics-700': + print('Convert to k700 class head') + label_map_path = '/mnt/petrelfs/huangbingkun/data/mix_kinetics/label_mixto700.json' + label_map = json.load(open(label_map_path)) + checkpoint_model[k] = checkpoint_model[k][label_map] + else: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + else: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + all_keys = list(checkpoint_model.keys()) + new_dict = OrderedDict() + for key in all_keys: + if key.startswith('backbone.'): + new_dict[key[9:]] = checkpoint_model[key] + elif key.startswith('encoder.'): + new_dict[key[8:]] = checkpoint_model[key] + else: + new_dict[key] = checkpoint_model[key] + checkpoint_model = new_dict + + # interpolate position embedding + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] # channel dim + num_patches = model.patch_embed.num_patches # + num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1 + + # height (== width) for the checkpoint position embedding + orig_size = int( + ((pos_embed_checkpoint.shape[-2] - num_extra_tokens) // + (args.num_frames // model.patch_embed.tubelet_size))**0.5) + # height (== width) for the new position embedding + new_size = int( + (num_patches // + (args.num_frames // model.patch_embed.tubelet_size))**0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % + (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + # B, L, C -> BT, H, W, C -> BT, C, H, W + pos_tokens = pos_tokens.reshape( + -1, args.num_frames // model.patch_embed.tubelet_size, + orig_size, orig_size, embedding_size) + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, + embedding_size).permute( + 0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, + size=(new_size, new_size), + mode='bicubic', + align_corners=False) + # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C + pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape( + -1, args.num_frames // model.patch_embed.tubelet_size, + new_size, new_size, embedding_size) + pos_tokens = pos_tokens.flatten(1, 3) # B, L, C + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + elif args.input_size != 224: + pos_tokens = model.pos_embed + org_num_frames = 16 + T = org_num_frames // args.tubelet_size + P = int((pos_tokens.shape[1] // T) ** 0.5) + C = pos_tokens.shape[2] + new_P = args.input_size // patch_size[0] + # B, L, C -> BT, H, W, C -> BT, C, H, W + pos_tokens = pos_tokens.reshape(-1, T, P, P, C) + pos_tokens = pos_tokens.reshape(-1, P, P, C).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_P, new_P), mode='bicubic', align_corners=False) + # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C + pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, T, new_P, new_P, C) + pos_tokens = pos_tokens.flatten(1, 3) # B, L, C + model.pos_embed = pos_tokens # update + if args.num_frames != 16: + org_num_frames = 16 + T = org_num_frames // args.tubelet_size + pos_tokens = model.pos_embed + new_T = args.num_frames // args.tubelet_size + P = int((pos_tokens.shape[1] // T) ** 0.5) + C = pos_tokens.shape[2] + pos_tokens = pos_tokens.reshape(-1, T, P, P, C) + pos_tokens = pos_tokens.permute(0, 2, 3, 4, 1).reshape(-1, C, T) # BHW,C,T + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=new_T, mode='linear') + pos_tokens = pos_tokens.reshape(1, P, P, C, new_T).permute(0, 4, 1, 2, 3) + pos_tokens = pos_tokens.flatten(1, 3) + model.pos_embed = pos_tokens # update + + utils.load_state_dict(model, + checkpoint_model, + prefix=args.model_prefix) + # model.load_state_dict(checkpoint_model, strict=False) + + model.to(device) + + model_ema = None + if args.model_ema: + # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper + model_ema = ModelEma(model, + decay=args.model_ema_decay, + device='cpu' if args.model_ema_force_cpu else '', + resume='') + print("Using EMA with decay = %.8f" % args.model_ema_decay) + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() + if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params:', n_parameters) + + total_batch_size = args.batch_size * args.update_freq * utils.get_world_size( + ) + num_training_steps_per_epoch = len(dataset_train) // total_batch_size + args.lr = args.lr * total_batch_size / 256 + #########scale the lr############# + args.min_lr = args.min_lr * total_batch_size / 256 + args.warmup_lr = args.warmup_lr * total_batch_size / 256 + #########scale the lr############# + print("LR = %.8f" % args.lr) + print("Batch size = %d" % total_batch_size) + print("Update frequent = %d" % args.update_freq) + print("Number of training examples = %d" % len(dataset_train)) + print("Number of training training per epoch = %d" % + num_training_steps_per_epoch) + + num_layers = model_without_ddp.get_num_layers() + if args.layer_decay < 1.0: + assigner = LayerDecayValueAssigner(list( + args.layer_decay**(num_layers + 1 - i) + for i in range(num_layers + 2)), + extra_decay=args.extra_decay) + else: + assigner = None + + if assigner is not None: + print("Assigned values = %s" % str(assigner.values)) + + skip_weight_decay_list = model.no_weight_decay() + print("Skip weight decay list: ", skip_weight_decay_list) + + if args.enable_deepspeed: + loss_scaler = None + optimizer_params = get_parameter_groups( + model, args.weight_decay, skip_weight_decay_list, + assigner.get_layer_id if assigner is not None else None, + assigner.get_scale if assigner is not None else None) + model, optimizer, _, _ = ds_init( + args=args, + model=model, + model_parameters=optimizer_params, + dist_init_required=not args.distributed, + ) + + print("model.gradient_accumulation_steps() = %d" % + model.gradient_accumulation_steps()) + assert model.gradient_accumulation_steps() == args.update_freq + else: + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.gpu], find_unused_parameters=False) + model_without_ddp = model.module + + optimizer = create_optimizer(args, + model_without_ddp, + skip_list=skip_weight_decay_list, + get_num_layer=assigner.get_layer_id + if assigner is not None else None, + get_layer_scale=assigner.get_scale + if assigner is not None else None) + loss_scaler = NativeScaler() + + print("Use step level LR scheduler!") + lr_schedule_values = utils.cosine_scheduler( + args.lr, + args.min_lr, + args.epochs, + num_training_steps_per_epoch, + warmup_epochs=args.warmup_epochs, + warmup_steps=args.warmup_steps, + ) + if args.weight_decay_end is None: + args.weight_decay_end = args.weight_decay + wd_schedule_values = utils.cosine_scheduler(args.weight_decay, + args.weight_decay_end, + args.epochs, + num_training_steps_per_epoch) + print("Max WD = %.7f, Min WD = %.7f" % + (max(wd_schedule_values), min(wd_schedule_values))) + + if mixup_fn is not None: + # smoothing is handled with mixup label transform + criterion = SoftTargetCrossEntropy() + elif args.smoothing > 0.: + criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) + else: + criterion = torch.nn.CrossEntropyLoss() + + print("criterion = %s" % str(criterion)) + + utils.auto_load_model(args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + model_ema=model_ema) + if args.validation: + test_stats = validation_one_epoch(data_loader_val, model, device) + print( + f"{len(dataset_val)} val images: Top-1 {test_stats['acc1']:.2f}%, Top-5 {test_stats['acc5']:.2f}%, loss {test_stats['loss']:.4f}" + ) + exit(0) + + if args.eval: + preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt') + test_stats = final_test(data_loader_test, model, device, preds_file) + torch.distributed.barrier() + if global_rank == 0: + print("Start merging results...") + final_top1, final_top5 = merge(args.output_dir, num_tasks) + print( + f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%" + ) + log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top1} + if args.output_dir and utils.is_main_process(): + with open(os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + exit(0) + # test_stats = validation_one_epoch(data_loader_test, model, device) + # print(f"Accuracy of the network on the {len(dataset_test)} test videos: {test_stats['acc1']:.1f}%") + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + max_accuracy = 0.0 + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + if log_writer is not None: + log_writer.set_step(epoch * num_training_steps_per_epoch * + args.update_freq) + train_stats = train_one_epoch( + model, + criterion, + data_loader_train, + optimizer, + device, + epoch, + loss_scaler, + args, + args.clip_grad, + model_ema, + mixup_fn, + log_writer=log_writer, + start_steps=epoch * num_training_steps_per_epoch, + lr_schedule_values=lr_schedule_values, + wd_schedule_values=wd_schedule_values, + num_training_steps_per_epoch=num_training_steps_per_epoch, + update_freq=args.update_freq, + ) + if args.output_dir and args.save_ckpt: + if (epoch + + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: + utils.save_model(args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + epoch=epoch, + model_ema=model_ema) + if data_loader_val is not None: + test_stats = validation_one_epoch(data_loader_val, model, device) + print( + f"Accuracy of the network on the {len(dataset_val)} val images: {test_stats['acc1']:.2f}%" + ) + if max_accuracy < test_stats["acc1"]: + max_accuracy = test_stats["acc1"] + if args.output_dir and args.save_ckpt: + utils.save_model(args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + epoch="best", + model_ema=model_ema) + + print(f'Max accuracy: {max_accuracy:.2f}%') + if log_writer is not None: + log_writer.update(val_acc1=test_stats['acc1'], + head="perf", + step=epoch) + log_writer.update(val_acc5=test_stats['acc5'], + head="perf", + step=epoch) + log_writer.update(val_loss=test_stats['loss'], + head="perf", + step=epoch) + + log_stats = { + **{f'train_{k}': v + for k, v in train_stats.items()}, + **{f'val_{k}': v + for k, v in test_stats.items()}, 'epoch': epoch, + 'n_parameters': n_parameters + } + else: + log_stats = { + **{f'train_{k}': v + for k, v in train_stats.items()}, + # **{f'test_{k}': v for k, v in test_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters + } + if args.output_dir and utils.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt') + test_stats = final_test(data_loader_test, model, device, preds_file) + torch.distributed.barrier() + if global_rank == 0: + print("Start merging results...") + final_top1, final_top5 = merge(args.output_dir, num_tasks) + print( + f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%" + ) + log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top5} + if args.output_dir and utils.is_main_process(): + with open(os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + opts, ds_init = get_args() + if opts.output_dir: + Path(opts.output_dir).mkdir(parents=True, exist_ok=True) + main(opts, ds_init) diff --git a/Pretrain/VideoMAE/run_class_linear.py b/Pretrain/VideoMAE/run_class_linear.py new file mode 100644 index 0000000..641e778 --- /dev/null +++ b/Pretrain/VideoMAE/run_class_linear.py @@ -0,0 +1,836 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' + +import argparse +import datetime +import json +import os +import time +from collections import OrderedDict +from pathlib import Path + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from scipy import interpolate +from timm.data.mixup import Mixup +from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from timm.models import create_model +from timm.utils import ModelEma + +import modeling_finetune +import utils +from datasets import build_dataset +from engine_for_finetuning import ( + final_test, + merge, + train_one_epoch, + validation_one_epoch, +) +from optim_factory import ( + LayerDecayValueAssigner, + create_optimizer, + get_parameter_groups, +) +from utils import NativeScalerWithGradNormCount as NativeScaler + + +def get_args(): + parser = argparse.ArgumentParser( + 'MAE fine-tuning and evaluation script for image classification', + add_help=False) + parser.add_argument('--batch_size', default=64, type=int) + parser.add_argument('--epochs', default=30, type=int) + parser.add_argument('--update_freq', default=1, type=int) + parser.add_argument('--save_ckpt_freq', default=100, type=int) + + # Model parameters + parser.add_argument('--model', + default='deit_base_patch16_224', + type=str, + metavar='MODEL', + help='Name of model to train') + parser.add_argument('--tubelet_size', type=int, default=2) + parser.add_argument('--input_size', + default=224, + type=int, + help='images input size') + + parser.add_argument('--drop', + type=float, + default=0.0, + metavar='PCT', + help='Dropout rate (default: 0.)') + parser.add_argument('--attn_drop_rate', + type=float, + default=0.0, + metavar='PCT', + help='Attention dropout rate (default: 0.)') + parser.add_argument('--drop_path', + type=float, + default=0.1, + metavar='PCT', + help='Drop path rate (default: 0.1)') + + parser.add_argument('--disable_eval_during_finetuning', + action='store_true', + default=False) + + parser.add_argument('--model_ema', action='store_true', default=False) + parser.add_argument('--model_ema_decay', + type=float, + default=0.9999, + help='') + parser.add_argument('--model_ema_force_cpu', + action='store_true', + default=False, + help='') + + # Optimizer parameters + parser.add_argument('--opt', + default='adamw', + type=str, + metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument('--opt_eps', + default=1e-8, + type=float, + metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument( + '--opt_betas', + default=None, + type=float, + nargs='+', + metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument('--clip_grad', + type=float, + default=None, + metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument('--momentum', + type=float, + default=0.9, + metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument('--weight_decay', + type=float, + default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument('--weight_decay_end', + type=float, + default=None, + help="""Final value of the + weight decay. We use a cosine schedule for WD and using a larger decay by + the end of training improves performance for ViTs.""") + + parser.add_argument('--lr', + type=float, + default=1e-3, + metavar='LR', + help='learning rate (default: 1e-3)') + parser.add_argument('--layer_decay', type=float, default=0.75) + + parser.add_argument('--warmup_lr', + type=float, + default=1e-6, + metavar='LR', + help='warmup learning rate (default: 1e-6)') + parser.add_argument( + '--min_lr', + type=float, + default=1e-6, + metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + + parser.add_argument('--warmup_epochs', + type=int, + default=5, + metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument( + '--warmup_steps', + type=int, + default=-1, + metavar='N', + help='num of steps to warmup LR, will overload warmup_epochs if set > 0' + ) + + # Augmentation parameters + parser.add_argument('--color_jitter', + type=float, + default=0.4, + metavar='PCT', + help='Color jitter factor (default: 0.4)') + # parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', + parser.add_argument( + '--aa', + type=str, + default='rand-m7-n4-mstd0.5-inc1', + metavar='NAME', + help= + 'Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)' + ), + parser.add_argument('--smoothing', + type=float, + default=0.1, + help='Label smoothing (default: 0.1)') + parser.add_argument( + '--train_interpolation', + type=str, + default='bicubic', + help= + 'Training interpolation (random, bilinear, bicubic default: "bicubic")' + ) + + # Evaluation parameters + parser.add_argument('--crop_pct', type=float, default=None) + parser.add_argument('--short_side_size', type=int, default=224) + parser.add_argument('--test_num_segment', type=int, default=10) + parser.add_argument('--test_num_crop', type=int, default=3) + + # * Random Erase params + parser.add_argument('--reprob', + type=float, + default=0.25, + metavar='PCT', + help='Random erase prob (default: 0.25)') + parser.add_argument('--remode', + type=str, + default='pixel', + help='Random erase mode (default: "pixel")') + parser.add_argument('--recount', + type=int, + default=1, + help='Random erase count (default: 1)') + parser.add_argument( + '--resplit', + action='store_true', + default=False, + help='Do not random erase first (clean) augmentation split') + + # * Mixup params + parser.add_argument('--mixup', + type=float, + default=0.8, + help='mixup alpha, mixup enabled if > 0.') + parser.add_argument('--cutmix', + type=float, + default=1.0, + help='cutmix alpha, cutmix enabled if > 0.') + parser.add_argument( + '--cutmix_minmax', + type=float, + nargs='+', + default=None, + help= + 'cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)' + ) + parser.add_argument( + '--mixup_prob', + type=float, + default=1.0, + help= + 'Probability of performing mixup or cutmix when either/both is enabled' + ) + parser.add_argument( + '--mixup_switch_prob', + type=float, + default=0.5, + help= + 'Probability of switching to cutmix when both mixup and cutmix enabled' + ) + parser.add_argument( + '--mixup_mode', + type=str, + default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"' + ) + + # * Finetuning params + parser.add_argument('--finetune', + default='', + help='finetune from checkpoint') + parser.add_argument('--model_key', default='model|module', type=str) + parser.add_argument('--model_prefix', default='', type=str) + parser.add_argument('--init_scale', default=0.001, type=float) + parser.add_argument('--use_mean_pooling', action='store_true') + parser.set_defaults(use_mean_pooling=True) + parser.add_argument('--use_cls', + action='store_false', + dest='use_mean_pooling') + + # Dataset parameters + parser.add_argument('--data_path', + default='/datasets01/imagenet_full_size/061417/', + type=str, + help='dataset path') + parser.add_argument('--eval_data_path', + default=None, + type=str, + help='dataset path for evaluation') + parser.add_argument('--nb_classes', + default=400, + type=int, + help='number of the classification types') + parser.add_argument('--imagenet_default_mean_and_std', + default=True, + action='store_true') + parser.add_argument('--num_segments', type=int, default=1) + parser.add_argument('--num_frames', type=int, default=16) + parser.add_argument('--sampling_rate', type=int, default=4) + parser.add_argument('--data_set', + default='Kinetics-400', + choices=[ + 'Kinetics-400', 'SSV2', 'UCF101', 'HMDB51', + 'Diving48', 'image_folder' + ], + type=str, + help='ImageNet dataset path') + parser.add_argument('--output_dir', + default='', + help='path where to save, empty for no saving') + parser.add_argument('--log_dir', + default=None, + help='path where to tensorboard log') + parser.add_argument('--device', + default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument('--no_auto_resume', + action='store_false', + dest='auto_resume') + parser.set_defaults(auto_resume=True) + + parser.add_argument('--save_ckpt', action='store_true') + parser.add_argument('--no_save_ckpt', + action='store_false', + dest='save_ckpt') + parser.set_defaults(save_ckpt=True) + + parser.add_argument('--start_epoch', + default=0, + type=int, + metavar='N', + help='start epoch') + parser.add_argument('--eval', + action='store_true', + help='Perform evaluation only') + parser.add_argument('--dist_eval', + action='store_true', + default=False, + help='Enabling distributed evaluation') + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument( + '--pin_mem', + action='store_true', + help= + 'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.' + ) + parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument('--world_size', + default=1, + type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument('--dist_url', + default='env://', + help='url used to set up distributed training') + + parser.add_argument('--enable_deepspeed', + action='store_true', + default=False) + + known_args, _ = parser.parse_known_args() + + if known_args.enable_deepspeed: + try: + import deepspeed + from deepspeed import DeepSpeedConfig + parser = deepspeed.add_config_arguments(parser) + ds_init = deepspeed.initialize + except: + print("Please 'pip install deepspeed==0.4.0'") + exit(0) + else: + ds_init = None + + return parser.parse_args(), ds_init + + +def main(args, ds_init): + utils.init_distributed_mode(args) + + if ds_init is not None: + utils.create_ds_config(args) + + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + # random.seed(seed) + + cudnn.benchmark = True + + dataset_train, args.nb_classes = build_dataset(is_train=True, + test_mode=False, + args=args) + if args.disable_eval_during_finetuning: + dataset_val = None + else: + dataset_val, _ = build_dataset(is_train=False, + test_mode=False, + args=args) + dataset_test, _ = build_dataset(is_train=False, test_mode=True, args=args) + + if True: # args.distributed: + num_tasks = utils.get_world_size() + global_rank = utils.get_rank() + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, + num_replicas=num_tasks, + rank=global_rank, + shuffle=True) + print("Sampler_train = %s" % str(sampler_train)) + if args.dist_eval: + if len(dataset_val) % num_tasks != 0: + print( + 'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' + 'This will slightly alter validation results as extra duplicate entries are added to achieve ' + 'equal num of samples per-process.') + sampler_val = torch.utils.data.DistributedSampler( + dataset_val, + num_replicas=num_tasks, + rank=global_rank, + shuffle=False) + sampler_test = torch.utils.data.DistributedSampler( + dataset_test, + num_replicas=num_tasks, + rank=global_rank, + shuffle=False) + else: + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + else: + sampler_train = torch.utils.data.RandomSampler(dataset_train) + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + if global_rank == 0 and args.log_dir is not None: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = utils.TensorboardLogger(log_dir=args.log_dir) + else: + log_writer = None + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True, + persistent_workers=True) + + if dataset_val is not None: + data_loader_val = torch.utils.data.DataLoader( + dataset_val, + sampler=sampler_val, + batch_size=int(1.5 * args.batch_size), + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False, + persistent_workers=True) + else: + data_loader_val = None + + if dataset_test is not None: + data_loader_test = torch.utils.data.DataLoader( + dataset_test, + sampler=sampler_test, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=False, + persistent_workers=True) + else: + data_loader_test = None + + mixup_fn = None + mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None + if mixup_active: + print("Mixup is activated!") + mixup_fn = Mixup(mixup_alpha=args.mixup, + cutmix_alpha=args.cutmix, + cutmix_minmax=args.cutmix_minmax, + prob=args.mixup_prob, + switch_prob=args.mixup_switch_prob, + mode=args.mixup_mode, + label_smoothing=args.smoothing, + num_classes=args.nb_classes) + + model = create_model( + args.model, + pretrained=False, + num_classes=args.nb_classes, + all_frames=args.num_frames * args.num_segments, + tubelet_size=args.tubelet_size, + drop_rate=args.drop, + drop_path_rate=args.drop_path, + attn_drop_rate=args.attn_drop_rate, + drop_block_rate=None, + use_mean_pooling=args.use_mean_pooling, + init_scale=args.init_scale, + ) + + patch_size = model.patch_embed.patch_size + print("Patch size = %s" % str(patch_size)) + + # args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1]) + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], + args.input_size // patch_size[1]) + + args.patch_size = patch_size + + if args.finetune: + if args.finetune.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url(args.finetune, + map_location='cpu', + check_hash=True) + else: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load ckpt from %s" % args.finetune) + checkpoint_model = None + for model_key in args.model_key.split('|'): + if model_key in checkpoint: + checkpoint_model = checkpoint[model_key] + print("Load state_dict by model_key = %s" % model_key) + break + if checkpoint_model is None: + checkpoint_model = checkpoint + state_dict = model.state_dict() + for k in ['head.weight', 'head.bias']: + if k in checkpoint_model and checkpoint_model[ + k].shape != state_dict[k].shape: + print(f"Removing key {k} from pretrained checkpoint") + del checkpoint_model[k] + + all_keys = list(checkpoint_model.keys()) + new_dict = OrderedDict() + for key in all_keys: + if key.startswith('backbone.'): + new_dict[key[9:]] = checkpoint_model[key] + elif key.startswith('encoder.'): + new_dict[key[8:]] = checkpoint_model[key] + else: + new_dict[key] = checkpoint_model[key] + checkpoint_model = new_dict + + # interpolate position embedding + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] # channel dim + num_patches = model.patch_embed.num_patches # + num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1 + + # height (== width) for the checkpoint position embedding + orig_size = int( + ((pos_embed_checkpoint.shape[-2] - num_extra_tokens) // + (args.num_frames // model.patch_embed.tubelet_size))**0.5) + # height (== width) for the new position embedding + new_size = int( + (num_patches // + (args.num_frames // model.patch_embed.tubelet_size))**0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % + (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + # B, L, C -> BT, H, W, C -> BT, C, H, W + pos_tokens = pos_tokens.reshape( + -1, args.num_frames // model.patch_embed.tubelet_size, + orig_size, orig_size, embedding_size) + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, + embedding_size).permute( + 0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, + size=(new_size, new_size), + mode='bicubic', + align_corners=False) + # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C + pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape( + -1, args.num_frames // model.patch_embed.tubelet_size, + new_size, new_size, embedding_size) + pos_tokens = pos_tokens.flatten(1, 3) # B, L, C + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed + + utils.load_state_dict(model, + checkpoint_model, + prefix=args.model_prefix) + # model.load_state_dict(checkpoint_model, strict=False) + + model.to(device) + + model_ema = None + if args.model_ema: + # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper + model_ema = ModelEma(model, + decay=args.model_ema_decay, + device='cpu' if args.model_ema_force_cpu else '', + resume='') + print("Using EMA with decay = %.8f" % args.model_ema_decay) + + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() + if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params:', n_parameters) + + total_batch_size = args.batch_size * args.update_freq * utils.get_world_size( + ) + num_training_steps_per_epoch = len(dataset_train) // total_batch_size + args.lr = args.lr * total_batch_size / 256 + #########scale the lr############# + args.min_lr = args.min_lr * total_batch_size / 256 + args.warmup_lr = args.warmup_lr * total_batch_size / 256 + #########scale the lr############# + print("LR = %.8f" % args.lr) + print("Batch size = %d" % total_batch_size) + print("Update frequent = %d" % args.update_freq) + print("Number of training examples = %d" % len(dataset_train)) + print("Number of training training per epoch = %d" % + num_training_steps_per_epoch) + + num_layers = model_without_ddp.get_num_layers() + if args.layer_decay < 1.0: + assigner = LayerDecayValueAssigner( + list(args.layer_decay**(num_layers + 1 - i) + for i in range(num_layers + 2))) + else: + assigner = None + + if assigner is not None: + print("Assigned values = %s" % str(assigner.values)) + + skip_weight_decay_list = model.no_weight_decay() + print("Skip weight decay list: ", skip_weight_decay_list) + + if args.enable_deepspeed: + loss_scaler = None + optimizer_params = get_parameter_groups( + model, args.weight_decay, skip_weight_decay_list, + assigner.get_layer_id if assigner is not None else None, + assigner.get_scale if assigner is not None else None) + model, optimizer, _, _ = ds_init( + args=args, + model=model, + model_parameters=optimizer_params, + dist_init_required=not args.distributed, + ) + + print("model.gradient_accumulation_steps() = %d" % + model.gradient_accumulation_steps()) + assert model.gradient_accumulation_steps() == args.update_freq + else: + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.gpu], find_unused_parameters=True) + model_without_ddp = model.module + + optimizer = create_optimizer(args, + model_without_ddp, + skip_list=skip_weight_decay_list, + get_num_layer=assigner.get_layer_id + if assigner is not None else None, + get_layer_scale=assigner.get_scale + if assigner is not None else None) + loss_scaler = NativeScaler() + + print("Use step level LR scheduler!") + lr_schedule_values = utils.cosine_scheduler( + args.lr, + args.min_lr, + args.epochs, + num_training_steps_per_epoch, + warmup_epochs=args.warmup_epochs, + warmup_steps=args.warmup_steps, + ) + if args.weight_decay_end is None: + args.weight_decay_end = args.weight_decay + wd_schedule_values = utils.cosine_scheduler(args.weight_decay, + args.weight_decay_end, + args.epochs, + num_training_steps_per_epoch) + print("Max WD = %.7f, Min WD = %.7f" % + (max(wd_schedule_values), min(wd_schedule_values))) + + if mixup_fn is not None: + # smoothing is handled with mixup label transform + criterion = SoftTargetCrossEntropy() + elif args.smoothing > 0.: + criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) + else: + criterion = torch.nn.CrossEntropyLoss() + + print("criterion = %s" % str(criterion)) + + utils.auto_load_model(args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + model_ema=model_ema) + + if args.eval: + preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt') + test_stats = final_test(data_loader_test, model, device, preds_file) + torch.distributed.barrier() + if global_rank == 0: + print("Start merging results...") + final_top1, final_top5 = merge(args.output_dir, num_tasks) + print( + f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%" + ) + log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top1} + if args.output_dir and utils.is_main_process(): + with open(os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + exit(0) + # test_stats = validation_one_epoch(data_loader_test, model, device) + # print(f"Accuracy of the network on the {len(dataset_test)} test videos: {test_stats['acc1']:.1f}%") + + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + max_accuracy = 0.0 + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + if log_writer is not None: + log_writer.set_step(epoch * num_training_steps_per_epoch * + args.update_freq) + train_stats = train_one_epoch( + model, + criterion, + data_loader_train, + optimizer, + device, + epoch, + loss_scaler, + args.clip_grad, + model_ema, + mixup_fn, + log_writer=log_writer, + start_steps=epoch * num_training_steps_per_epoch, + lr_schedule_values=lr_schedule_values, + wd_schedule_values=wd_schedule_values, + num_training_steps_per_epoch=num_training_steps_per_epoch, + update_freq=args.update_freq, + ) + if args.output_dir and args.save_ckpt: + if (epoch + + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: + utils.save_model(args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + epoch=epoch, + model_ema=model_ema) + if data_loader_val is not None: + test_stats = validation_one_epoch(data_loader_val, model, device) + print( + f"Accuracy of the network on the {len(dataset_val)} val images: {test_stats['acc1']:.1f}%" + ) + if max_accuracy < test_stats["acc1"]: + max_accuracy = test_stats["acc1"] + if args.output_dir and args.save_ckpt: + utils.save_model(args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + epoch="best", + model_ema=model_ema) + + print(f'Max accuracy: {max_accuracy:.2f}%') + if log_writer is not None: + log_writer.update(val_acc1=test_stats['acc1'], + head="perf", + step=epoch) + log_writer.update(val_acc5=test_stats['acc5'], + head="perf", + step=epoch) + log_writer.update(val_loss=test_stats['loss'], + head="perf", + step=epoch) + + log_stats = { + **{f'train_{k}': v + for k, v in train_stats.items()}, + **{f'val_{k}': v + for k, v in test_stats.items()}, 'epoch': epoch, + 'n_parameters': n_parameters + } + else: + log_stats = { + **{f'train_{k}': v + for k, v in train_stats.items()}, + # **{f'test_{k}': v for k, v in test_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters + } + if args.output_dir and utils.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open(os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + preds_file = os.path.join(args.output_dir, str(global_rank) + '.txt') + test_stats = final_test(data_loader_test, model, device, preds_file) + torch.distributed.barrier() + if global_rank == 0: + print("Start merging results...") + final_top1, final_top5 = merge(args.output_dir, num_tasks) + print( + f"Accuracy of the network on the {len(dataset_test)} test videos: Top-1: {final_top1:.2f}%, Top-5: {final_top5:.2f}%" + ) + log_stats = {'Final top-1': final_top1, 'Final Top-5': final_top5} + if args.output_dir and utils.is_main_process(): + with open(os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + opts, ds_init = get_args() + if opts.output_dir: + Path(opts.output_dir).mkdir(parents=True, exist_ok=True) + main(opts, ds_init) diff --git a/Pretrain/VideoMAE/run_mae_pretraining.py b/Pretrain/VideoMAE/run_mae_pretraining.py new file mode 100644 index 0000000..8f76f62 --- /dev/null +++ b/Pretrain/VideoMAE/run_mae_pretraining.py @@ -0,0 +1,440 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' + +import argparse +import datetime +import json +import os +import time +from collections import OrderedDict +from functools import partial +from pathlib import Path + +import numpy as np +import torch +import torch.backends.cudnn as cudnn +from timm.models import create_model + +import modeling_pretrain +import utils +from datasets import build_pretraining_dataset +from engine_for_pretraining import train_one_epoch +from optim_factory import create_optimizer +from utils import NativeScalerWithGradNormCount as NativeScaler +from utils import multiple_pretrain_samples_collate + + +def get_args(): + parser = argparse.ArgumentParser('MAE pre-training script', add_help=False) + parser.add_argument('--batch_size', default=64, type=int) + parser.add_argument('--epochs', default=300, type=int) + parser.add_argument('--save_ckpt_freq', default=50, type=int) + + # Model parameters + parser.add_argument( + '--model', + default='pretrain_mae_base_patch16_224', + type=str, + metavar='MODEL', + help='Name of model to train') + parser.add_argument('--with_checkpoint', action='store_true', default=False) + + parser.add_argument( + '--decoder_depth', default=4, type=int, help='depth of decoder') + + parser.add_argument( + '--mask_type', + default='random', + choices=['random', 't_consist', 't_progressive', 't_center_prog'], + type=str, + help='masked strategy of visual tokens/patches') + + parser.add_argument( + '--mask_ratio', + default=0.75, + type=float, + help='ratio of the visual tokens/patches need be masked') + parser.add_argument( + '--decode_ratio', + default=1.0, + type=float, + help='ratio of the visual tokens/patches need be masked') + + parser.add_argument( + '--input_size', + default=224, + type=int, + help='images input size for backbone') + + parser.add_argument( + '--drop_path', + type=float, + default=0.0, + metavar='PCT', + help='Drop path rate (default: 0.1)') + + parser.add_argument( + '--normlize_target', + default=True, + type=bool, + help='normalized the target patch pixels') + + # Optimizer parameters + parser.add_argument( + '--opt', + default='adamw', + type=str, + metavar='OPTIMIZER', + help='Optimizer (default: "adamw"') + parser.add_argument( + '--opt_eps', + default=1e-8, + type=float, + metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') + parser.add_argument( + '--opt_betas', + default=None, + type=float, + nargs='+', + metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') + parser.add_argument( + '--clip_grad', + type=float, + default=None, + metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') + parser.add_argument( + '--momentum', + type=float, + default=0.9, + metavar='M', + help='SGD momentum (default: 0.9)') + parser.add_argument( + '--weight_decay', + type=float, + default=0.05, + help='weight decay (default: 0.05)') + parser.add_argument( + '--weight_decay_end', + type=float, + default=None, + help="""Final value of the + weight decay. We use a cosine schedule for WD. + (Set the same value with args.weight_decay to keep weight decay no change)""" + ) + + parser.add_argument( + '--lr', + type=float, + default=1.5e-4, + metavar='LR', + help='learning rate (default: 1.5e-4)') + parser.add_argument( + '--warmup_lr', + type=float, + default=1e-6, + metavar='LR', + help='warmup learning rate (default: 1e-6)') + parser.add_argument( + '--min_lr', + type=float, + default=1e-5, + metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') + + parser.add_argument( + '--warmup_epochs', + type=int, + default=40, + metavar='N', + help='epochs to warmup LR, if scheduler supports') + parser.add_argument( + '--warmup_steps', + type=int, + default=-1, + metavar='N', + help='epochs to warmup LR, if scheduler supports') + + # Augmentation parameters + parser.add_argument( + '--color_jitter', + type=float, + default=0.0, + metavar='PCT', + help='Color jitter factor (default: 0.4)') + parser.add_argument( + '--train_interpolation', + type=str, + default='bicubic', + help= + 'Training interpolation (random, bilinear, bicubic default: "bicubic")') + + # * Finetuning params + parser.add_argument( + '--finetune', default='', help='finetune from checkpoint') + + # Dataset parameters + parser.add_argument( + '--data_path', + default='/datasets01/imagenet_full_size/061417/train', + type=str, + help='dataset path') + parser.add_argument( + '--data_root', default='', type=str, help='dataset path root') + parser.add_argument( + '--imagenet_default_mean_and_std', default=True, action='store_true') + parser.add_argument('--num_frames', type=int, default=16) + parser.add_argument('--sampling_rate', type=int, default=4) + parser.add_argument('--num_sample', type=int, default=1) + parser.add_argument( + '--output_dir', + default='', + help='path where to save, empty for no saving') + parser.add_argument( + '--log_dir', default=None, help='path where to tensorboard log') + parser.add_argument( + '--device', default='cuda', help='device to use for training / testing') + parser.add_argument('--seed', default=0, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--auto_resume', action='store_true') + parser.add_argument( + '--no_auto_resume', action='store_false', dest='auto_resume') + parser.set_defaults(auto_resume=True) + + parser.add_argument( + '--start_epoch', default=0, type=int, metavar='N', help='start epoch') + parser.add_argument('--num_workers', default=10, type=int) + parser.add_argument( + '--pin_mem', + action='store_true', + help= + 'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.' + ) + parser.add_argument( + '--no_pin_mem', action='store_false', dest='pin_mem', help='') + parser.set_defaults(pin_mem=True) + + # distributed training parameters + parser.add_argument( + '--world_size', + default=1, + type=int, + help='number of distributed processes') + parser.add_argument('--local_rank', default=-1, type=int) + parser.add_argument('--dist_on_itp', action='store_true') + parser.add_argument( + '--dist_url', + default='env://', + help='url used to set up distributed training') + + return parser.parse_args() + + +def get_model(args): + print(f"Creating model: {args.model}") + model = create_model( + args.model, + pretrained=False, + drop_path_rate=args.drop_path, + drop_block_rate=None, + decoder_depth=args.decoder_depth, + with_cp=args.with_checkpoint) + + return model + + +def main(args): + utils.init_distributed_mode(args) + + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + # random.seed(seed) + + cudnn.benchmark = True + + model = get_model(args) + patch_size = model.encoder.patch_embed.patch_size + print("Patch size = %s" % str(patch_size)) + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], + args.input_size // patch_size[1]) + args.patch_size = patch_size + + # get dataset + dataset_train = build_pretraining_dataset(args) + + if True: # args.distributed: + num_tasks = utils.get_world_size() + global_rank = utils.get_rank() + sampler_rank = global_rank + num_training_steps_per_epoch = len( + dataset_train) // args.batch_size // num_tasks + + sampler_train = torch.utils.data.DistributedSampler( + dataset_train, + num_replicas=num_tasks, + rank=sampler_rank, + shuffle=True) + print("Sampler_train = %s" % str(sampler_train)) + else: + sampler_train = torch.utils.data.RandomSampler(dataset_train) + + if global_rank == 0 and args.log_dir is not None: + os.makedirs(args.log_dir, exist_ok=True) + log_writer = utils.TensorboardLogger(log_dir=args.log_dir) + else: + log_writer = None + + if args.num_sample > 1: + collate_func = partial(multiple_pretrain_samples_collate, fold=False) + else: + collate_func = None + + data_loader_train = torch.utils.data.DataLoader( + dataset_train, + sampler=sampler_train, + batch_size=args.batch_size, + num_workers=args.num_workers, + pin_memory=args.pin_mem, + drop_last=True, + collate_fn=collate_func, + worker_init_fn=utils.seed_worker, + persistent_workers=True) + + if args.finetune: + checkpoint = torch.load(args.finetune, map_location='cpu') + + print("Load ckpt from %s" % args.finetune) + checkpoint_model = None + for model_key in ['model', 'module']: + if model_key in checkpoint: + checkpoint_model = checkpoint[model_key] + print("Load state_dict by model_key = %s" % model_key) + break + if checkpoint_model is None: + checkpoint_model = checkpoint + + utils.load_state_dict(model, checkpoint_model) + + model.to(device) + model_without_ddp = model + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + + print("Model = %s" % str(model_without_ddp)) + print('number of params: {} M'.format(n_parameters / 1e6)) + + total_batch_size = args.batch_size * utils.get_world_size() + + args.lr = args.lr * total_batch_size / 256 + #########scale the lr############# + args.min_lr = args.min_lr * total_batch_size / 256 + args.warmup_lr = args.warmup_lr * total_batch_size / 256 + #########scale the lr############# + print("LR = %.8f" % args.lr) + print("Batch size = %d" % total_batch_size) + print("Number of training steps = %d" % num_training_steps_per_epoch) + print("Number of training examples per epoch = %d" % + (total_batch_size * num_training_steps_per_epoch)) + + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[args.gpu], find_unused_parameters=False) + model_without_ddp = model.module + + optimizer = create_optimizer(args, model_without_ddp) + loss_scaler = NativeScaler() + + print("Use step level LR & WD scheduler!") + lr_schedule_values = utils.cosine_scheduler( + args.lr, + args.min_lr, + args.epochs, + num_training_steps_per_epoch, + warmup_epochs=args.warmup_epochs, + warmup_steps=args.warmup_steps, + ) + if args.weight_decay_end is None: + args.weight_decay_end = args.weight_decay + wd_schedule_values = utils.cosine_scheduler(args.weight_decay, + args.weight_decay_end, + args.epochs, + num_training_steps_per_epoch) + print("Max WD = %.7f, Min WD = %.7f" % + (max(wd_schedule_values), min(wd_schedule_values))) + + utils.auto_load_model( + args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler) + torch.cuda.empty_cache() + print(f"Start training for {args.epochs} epochs") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + data_loader_train.sampler.set_epoch(epoch) + if log_writer is not None: + log_writer.set_step(epoch * num_training_steps_per_epoch) + train_stats = train_one_epoch( + model, + data_loader_train, + optimizer, + device, + epoch, + loss_scaler, + args.clip_grad, + log_writer=log_writer, + start_steps=epoch * num_training_steps_per_epoch, + lr_schedule_values=lr_schedule_values, + wd_schedule_values=wd_schedule_values, + patch_size=patch_size[0], + normlize_target=args.normlize_target) + if args.output_dir: + if (epoch + + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: + utils.save_model( + args=args, + model=model, + model_without_ddp=model_without_ddp, + optimizer=optimizer, + loss_scaler=loss_scaler, + epoch=epoch) + + log_stats = { + **{f'train_{k}': v + for k, v in train_stats.items()}, 'epoch': epoch, + 'n_parameters': n_parameters + } + + if args.output_dir and utils.is_main_process(): + if log_writer is not None: + log_writer.flush() + with open( + os.path.join(args.output_dir, "log.txt"), + mode="a", + encoding="utf-8") as f: + f.write(json.dumps(log_stats) + "\n") + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + opts = get_args() + if opts.output_dir: + Path(opts.output_dir).mkdir(parents=True, exist_ok=True) + main(opts) diff --git a/Pretrain/VideoMAE/run_mae_vis.py b/Pretrain/VideoMAE/run_mae_vis.py new file mode 100644 index 0000000..07306f3 --- /dev/null +++ b/Pretrain/VideoMAE/run_mae_vis.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +import argparse + +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import os +from pathlib import Path + +# import datetime +import numpy as np + +# import time +import torch +import torch.backends.cudnn as cudnn +from decord import VideoReader, cpu +from einops import rearrange +from petrel_client.client import Client +from PIL import Image +from timm.data import create_transform +from timm.data.constants import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + IMAGENET_INCEPTION_MEAN, + IMAGENET_INCEPTION_STD, +) +from timm.models import create_model +from torchvision import datasets, transforms +from torchvision.transforms import ToPILImage + +import modeling_pretrain +import utils +from datasets import DataAugmentationForMAE +from kinetics import VideoClsDataset +from mae import VideoMAE +from masking_generator import ( + RandomMaskingGenerator, + TemporalCenteringProgressiveMaskingGenerator, + TemporalConsistencyMaskingGenerator, + TemporalProgressiveMaskingGenerator, +) +from transforms import * + + +class DataAugmentationForMAE(object): + + def __init__(self, args): + imagenet_default_mean_and_std = args.imagenet_default_mean_and_std + mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN + std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD + self.input_mean = [0.485, 0.456, 0.406] + self.input_std = [0.229, 0.224, 0.225] + div = True + roll = False + normalize = GroupNormalize(self.input_mean, self.input_std) + self.train_augmentation = GroupCenterCrop(args.input_size) + # self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) + self.transform = transforms.Compose([ + # GroupScale((240,320)), + self.train_augmentation, + Stack(roll=roll), + ToTorchFormatTensor(div=div), + normalize, + ]) + if args.mask_type == 'random': + self.masked_position_generator = RandomMaskingGenerator( + args.window_size, args.mask_ratio) + elif args.mask_type == 't_consist': + self.masked_position_generator = TemporalConsistencyMaskingGenerator( + args.window_size, args.mask_ratio) + + def __call__(self, images): + process_data, _ = self.transform(images) + return process_data, self.masked_position_generator() + + def __repr__(self): + repr = "(DataAugmentationForBEiT,\n" + repr += " transform = %s,\n" % str(self.transform) + repr += " Masked position generator = %s,\n" % str( + self.masked_position_generator) + repr += ")" + return repr + + +def get_args(): + parser = argparse.ArgumentParser('MAE visualization reconstruction script', + add_help=False) + parser.add_argument('img_path', type=str, help='input image path') + parser.add_argument('save_path', type=str, help='save image path') + parser.add_argument('model_path', + type=str, + help='checkpoint path of model') + parser.add_argument( + '--mask_type', + default='random', + choices=['random', 't_consist', 't_progressive', 't_center_prog'], + type=str, + help='masked strategy of visual tokens/patches') + parser.add_argument('--num_frames', type=int, default=16) + parser.add_argument('--sampling_rate', type=int, default=4) + parser.add_argument('--decoder_depth', + default=4, + type=int, + help='depth of decoder') + parser.add_argument('--input_size', + default=224, + type=int, + help='images input size for backbone') + parser.add_argument('--device', + default='cuda:0', + help='device to use for training / testing') + parser.add_argument('--imagenet_default_mean_and_std', + default=True, + action='store_true') + parser.add_argument( + '--mask_ratio', + default=0.75, + type=float, + help='ratio of the visual tokens/patches need be masked') + # Model parameters + parser.add_argument('--model', + default='pretrain_mae_base_patch16_224', + type=str, + metavar='MODEL', + help='Name of model to vis') + parser.add_argument('--drop_path', + type=float, + default=0.0, + metavar='PCT', + help='Drop path rate (default: 0.1)') + + return parser.parse_args() + + +def get_model(args): + print(f"Creating model: {args.model}") + model = create_model(args.model, + pretrained=False, + drop_path_rate=args.drop_path, + drop_block_rate=None, + decoder_depth=args.decoder_depth) + + return model + + +def main(args): + print(args) + + device = torch.device(args.device) + cudnn.benchmark = True + + model = get_model(args) + patch_size = model.encoder.patch_embed.patch_size + print("Patch size = %s" % str(patch_size)) + args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], + args.input_size // patch_size[1]) + args.patch_size = patch_size + + model.to(device) + checkpoint = torch.load(args.model_path, map_location='cpu') + model.load_state_dict(checkpoint['model']) + model.eval() + + if args.save_path: + Path(args.save_path).mkdir(parents=True, exist_ok=True) + + tmp = np.arange(0, 32, 2) + 60 + frame_id_list = tmp.tolist() + + if args.img_path.startswith("s3:"): + client = Client() + video_bytes = client.get(args.img_path) + vr = VideoReader(memoryview(video_bytes), mc=True, ctx=cpu(0)) + else: + with open(args.img_path, 'rb') as f: + vr = VideoReader(f, ctx=cpu(0)) + + duration = len(vr) + new_length = 1 + new_step = 1 + skip_length = new_length * new_step + video_data = vr.get_batch(frame_id_list).asnumpy() + + print(video_data.shape) + img = [ + Image.fromarray(video_data[vid, :, :, :]).convert('RGB') + for vid, _ in enumerate(frame_id_list) + ] + + transforms = DataAugmentationForMAE(args) + img, bool_masked_pos = transforms((img, None)) # T*C,H,W + # print(img.shape) + img = img.view((args.num_frames, 3) + img.size()[-2:]).transpose( + 0, 1) # T*C,H,W -> T,C,H,W -> C,T,H,W + # img = img.view(( -1 , args.num_frames) + img.size()[-2:]) + bool_masked_pos = torch.from_numpy(bool_masked_pos) + + with torch.no_grad(): + # img = img[None, :] + # bool_masked_pos = bool_masked_pos[None, :] + img = img.unsqueeze(0) + print(img.shape) + bool_masked_pos = bool_masked_pos.unsqueeze(0) + + img = img.to(device, non_blocking=True) + bool_masked_pos = bool_masked_pos.to( + device, non_blocking=True).flatten(1).to(torch.bool) + outputs = model(img, bool_masked_pos) + + #save original img + mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, None, + None, None] + std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, None, + None, None] + # unnorm_images = images * std + mean # in [0, 1] + print(img.shape) + ori_img = img * std + mean # in [0, 1] + imgs = [ + ToPILImage()(ori_img[0, :, vid, :, :].cpu()) + for vid, _ in enumerate(frame_id_list) + ] + for id, im in enumerate(imgs): + im.save(f"{args.save_path}/ori_img{id}.jpg") + + img_squeeze = rearrange( + ori_img, + 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', + p0=2, + p1=patch_size[0], + p2=patch_size[0]) + img_norm = (img_squeeze - img_squeeze.mean(dim=-2, keepdim=True)) / ( + img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) + img_patch = rearrange(img_norm, 'b n p c -> b n (p c)') + img_patch[bool_masked_pos] = outputs + + #make mask + mask = torch.ones_like(img_patch) + mask[bool_masked_pos] = 0 + mask = rearrange(mask, 'b n (p c) -> b n p c', c=3) + mask = rearrange(mask, + 'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2) ', + p0=2, + p1=patch_size[0], + p2=patch_size[1], + h=14, + w=14) + + #save reconstruction img + rec_img = rearrange(img_patch, 'b n (p c) -> b n p c', c=3) + # Notice: To visualize the reconstruction image, we add the predict and the original mean and var of each patch. Issue #40 + rec_img = rec_img * ( + img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + + 1e-6) + img_squeeze.mean(dim=-2, keepdim=True) + rec_img = rearrange( + rec_img, + 'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2)', + p0=2, + p1=patch_size[0], + p2=patch_size[1], + h=14, + w=14) + imgs = [ + ToPILImage()(rec_img[0, :, vid, :, :].cpu().clamp(0, 0.996)) + for vid, _ in enumerate(frame_id_list) + ] + + # imgs = [ ToPILImage()(rec_img[0, :, vid, :, :].cpu().clip(0,0.996)) for vid, _ in enumerate(frame_id_list) ] + for id, im in enumerate(imgs): + im.save(f"{args.save_path}/rec_img{id}.jpg") + + #save random mask img + img_mask = rec_img * mask + imgs = [ + ToPILImage()(img_mask[0, :, vid, :, :].cpu()) + for vid, _ in enumerate(frame_id_list) + ] + for id, im in enumerate(imgs): + im.save(f"{args.save_path}/mask_img{id}.jpg") + + +if __name__ == '__main__': + opts = get_args() + main(opts) diff --git a/Pretrain/VideoMAE/scripts/finetune/dist_train_vit_b_k400_ft.sh b/Pretrain/VideoMAE/scripts/finetune/dist_train_vit_b_k400_ft.sh new file mode 100644 index 0000000..c57497a --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/dist_train_vit_b_k400_ft.sh @@ -0,0 +1,32 @@ +# Set the path to save checkpoints +OUTPUT_DIR='YOUR_PATH/k400_videomae_pretrain_base_patch16_224_frame_16x4_tube_mask_ratio_0.9_e800/eval_lr_1e-3_epoch_100' +# path to Kinetics set (train.csv/val.csv/test.csv) +DATA_PATH='YOUR_PATH/list_kinetics-400' +# path to pretrain model +MODEL_PATH='YOUR_PATH/k400_videomae_pretrain_base_patch16_224_frame_16x4_tube_mask_ratio_0.9_e800/checkpoint-799.pth' + +# batch_size can be adjusted according to number of GPUs +# this script is for 64 GPUs (8 nodes x 8 GPUs) +OMP_NUM_THREADS=1 python -m torch.distributed.launch --nproc_per_node=8 \ + --master_port 12320 --nnodes=8 --node_rank=$1 --master_addr=$2 \ + run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 16 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --epochs 100 \ + --dist_eval --enable_deepspeed diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_anet_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_anet_ft.sh new file mode 100644 index 0000000..82174d6 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_anet_ft.sh @@ -0,0 +1,51 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_b_hybrid_pt_anet_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/anet' +MODEL_PATH='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/hybrid_1m_pretrain/checkpoint-800.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-16} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-14} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_set ANet \ + --nb_classes 200 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 16 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 14 \ + --opt adamw \ + --lr 1e-3 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --epochs 100 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_k400_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_k400_ft.sh new file mode 100644 index 0000000..c316b56 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_k400_ft.sh @@ -0,0 +1,49 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/PATH_TO_LOG' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k400' +MODEL_PATH='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/PATH_TO_MODEL' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=spot \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 16 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --epochs 100 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_k400_sparse_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_k400_sparse_ft.sh new file mode 100644 index 0000000..468c1ba --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_k400_sparse_ft.sh @@ -0,0 +1,48 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_b_k400_pt_k400_ft_tsn' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k400' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_b_k400_pt_1600e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 16 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sparse_sample \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --test_num_segment 2 \ + --test_num_crop 3 \ + --epochs 100 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_ssv2_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_ssv2_ft.sh new file mode 100644 index 0000000..e8c288f --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_b_ssv2_ft.sh @@ -0,0 +1,53 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_b_ssv2_pt_dm05_ssv2_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/sthv2' +MODEL_PATH='/mnt/petrelfs/huangbingkun/work_dir_VideoMAE/vit_b_ssv2_pt_dm05/checkpoint-800.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-16} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-14} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_base_patch16_224 \ + --data_set SSV2 \ + --nb_classes 174 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 16 \ + --num_sample 2 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --opt adamw \ + --lr 2e-3 \ + --num_workers 8 \ + --layer_decay 0.65 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 25 \ + --dist_eval \ + --test_num_segment 2 \ + --test_num_crop 3 \ + --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k400_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k400_ft.sh new file mode 100644 index 0000000..96b6187 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k400_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_k400_ft_2' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k400' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-400 \ + --nb_classes 400 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 5 \ + --epochs 35 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k400_sparse_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k400_sparse_ft.sh new file mode 100644 index 0000000..b746514 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k400_sparse_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_k400_ft_tsn' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k400' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-400 \ + --nb_classes 400 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sparse_sample \ + --num_sample 1 \ + --num_workers 8 \ + --opt adamw \ + --lr 3e-4 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 5 \ + --epochs 75 \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k600_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k600_ft.sh new file mode 100644 index 0000000..578a157 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k600_ft.sh @@ -0,0 +1,57 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_k600_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k600' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-600 \ + --nb_classes 600 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 5 \ + --epochs 40 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k600_it_k400_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k600_it_k400_ft.sh new file mode 100644 index 0000000..7d07123 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k600_it_k400_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_k600_it_k400_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k400' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e_k600_ft.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-400 \ + --nb_classes 400 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 5e-4 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.3 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 3 \ + --epochs 15 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k700_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k700_ft.sh new file mode 100644 index 0000000..e20aaaa --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_k700_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_k700_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k700' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-700 \ + --nb_classes 700 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 1 \ + --num_workers 8 \ + --opt adamw \ + --lr 3e-4 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 5 \ + --epochs 75 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_ft.sh new file mode 100644 index 0000000..92ee9a5 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_ft.sh @@ -0,0 +1,57 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_mixk_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/mix_kinetics' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set MixKinetics \ + --nb_classes 710 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 5 \ + --epochs 40 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k400_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k400_ft.sh new file mode 100644 index 0000000..ba7a7de --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k400_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_mixk_it_k400_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/mix_kinetics/k400' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_1200e_mixk_ft_v1.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-400 \ + --nb_classes 400 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 2e-6 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 1 \ + --epochs 3 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k600_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k600_ft.sh new file mode 100644 index 0000000..13539d2 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k600_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_mixk_it_k600_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k600' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_1200e_mixk_ft_v1.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-600 \ + --nb_classes 600 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 2e-6 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 1 \ + --epochs 3 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k700_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k700_ft.sh new file mode 100644 index 0000000..e3b416d --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_mixk_it_k700_ft.sh @@ -0,0 +1,58 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_mixk_it_k700_ft' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k700' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_1200e_mixk_ft_v1.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set Kinetics-700 \ + --nb_classes 700 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 4 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 2 \ + --num_workers 8 \ + --opt adamw \ + --lr 2e-6 \ + --warmup_lr 1e-8 \ + --min_lr 1e-6 \ + --drop_path 0.2 \ + --head_drop_rate 0.5 \ + --layer_decay 0.8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --warmup_epochs 1 \ + --epochs 3 \ + --test_num_segment 7 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_ssv2_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_ssv2_ft.sh new file mode 100644 index 0000000..d7f2525 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_h_ssv2_ft.sh @@ -0,0 +1,54 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pt_ssv2_ft_tsn' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/sthv2' +MODEL_PATH='/mnt/cache/share_data/huangbingkun/model/vit_h_hybridv2_pt_1200e.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-10} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_huge_patch16_224 \ + --data_set SSV2 \ + --nb_classes 174 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 8 \ + --num_sample 1 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --opt adamw \ + --lr 5e-4 \ + --num_workers 8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --drop_path 0.2 \ + --head_drop_rate 0.3 \ + --layer_decay 0.8 \ + --epochs 30 \ + --test_num_segment 2 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_k400_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_k400_ft.sh new file mode 100644 index 0000000..28477d2 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_k400_ft.sh @@ -0,0 +1,50 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_pt_k400_ft_dp_02' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k400' +MODEL_PATH='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_1m_pretrain/checkpoint-800.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --async \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 6 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 8 \ + --opt adamw \ + --lr 2e-3 \ + --drop_path 0.2 \ + --layer_decay 0.75 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 40 \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_k700_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_k700_ft.sh new file mode 100644 index 0000000..5956018 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_k700_ft.sh @@ -0,0 +1,52 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_pt_k700_ft_dp_02' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k700' +MODEL_PATH='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_1m_pretrain/checkpoint-800.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --async \ + ${SRUN_ARGS} \ + python run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --data_set Kinetics-700 \ + --nb_classes 700 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 6 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 8 \ + --opt adamw \ + --lr 2e-3 \ + --drop_path 0.2 \ + --layer_decay 0.75 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --epochs 40 \ + --test_num_segment 5 \ + --test_num_crop 3 \ + --dist_eval --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_ssv2_ft.sh b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_ssv2_ft.sh new file mode 100644 index 0000000..3a0d700 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/finetune/slurm_train_vit_l_ssv2_ft.sh @@ -0,0 +1,53 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_pt_ssv2_ft_tsn' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/sthv2' +MODEL_PATH='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_1m_pretrain/checkpoint-800.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --exclude=SH-IDC1-10-140-0-165 \ + ${SRUN_ARGS} \ + python -u run_class_finetuning.py \ + --model vit_large_patch16_224 \ + --data_set SSV2 \ + --nb_classes 174 \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + --batch_size 12 \ + --num_sample 1 \ + --input_size 224 \ + --short_side_size 224 \ + --save_ckpt_freq 10 \ + --num_frames 16 \ + --opt adamw \ + --lr 5e-4 \ + --num_workers 8 \ + --opt_betas 0.9 0.999 \ + --weight_decay 0.05 \ + --drop_path 0.2 \ + --epochs 30 \ + --dist_eval \ + --test_num_segment 2 \ + --test_num_crop 3 \ + --enable_deepspeed \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/pretrain/dist_train_vit_b_k400_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/dist_train_vit_b_k400_pt.sh new file mode 100644 index 0000000..ea47470 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/dist_train_vit_b_k400_pt.sh @@ -0,0 +1,26 @@ +# Set the path to save checkpoints +OUTPUT_DIR='YOUR_PATH/k400_videomae_pretrain_base_patch16_224_frame_16x4_tube_mask_ratio_0.9_e800' +# Set the path to Kinetics train set. +DATA_PATH='YOUR_PATH/list_kinetics-400/train.csv' + +# batch_size can be adjusted according to number of GPUs +# this script is for 64 GPUs (8 nodes x 8 GPUs) +OMP_NUM_THREADS=1 python -m torch.distributed.launch --nproc_per_node=8 \ + --master_port 12320 --nnodes=8 --node_rank=$1 --master_addr=$2 \ + run_mae_pretraining.py \ + --data_path ${DATA_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_base_patch16_224 \ + --decoder_depth 4 \ + --batch_size 64 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 16 \ + --opt adamw \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 40 \ + --save_ckpt_freq 200 \ + --epochs 801 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} diff --git a/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_hybrid_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_hybrid_pt.sh new file mode 100644 index 0000000..751168e --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_hybrid_pt.sh @@ -0,0 +1,43 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/hybrid_1m_pretrain' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/hybrid_1m_sp_train.csv' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --async \ + ${SRUN_ARGS} \ + python -u run_mae_pretraining.py \ + --data_path ${DATA_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_base_patch16_224 \ + --decoder_depth 4 \ + --batch_size 64 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 8 \ + --opt adamw \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 40 \ + --save_ckpt_freq 20 \ + --epochs 801 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_k400_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_k400_pt.sh new file mode 100644 index 0000000..d614140 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_k400_pt.sh @@ -0,0 +1,43 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/k400_pretrain' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/train_shm.csv' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-16} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-16} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --async \ + ${SRUN_ARGS} \ + python -u run_mae_pretraining.py \ + --data_path ${DATA_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_base_patch16_224 \ + --decoder_depth 4 \ + --batch_size 64 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 16 \ + --opt adamw \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 40 \ + --save_ckpt_freq 200 \ + --epochs 801 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_ssv2_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_ssv2_pt.sh new file mode 100644 index 0000000..1e7962f --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_b_ssv2_pt.sh @@ -0,0 +1,45 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_b_ssv2_pt' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/sthv2/ssv2_train.csv' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-14} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python -u run_mae_pretraining.py \ + --data_root '/mnt/petrelfs/videointern' \ + --data_path ${DATA_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_base_patch16_224 \ + --decoder_depth 4 \ + --batch_size 64 \ + --num_frames 16 \ + --sampling_rate 2 \ + --num_workers 8 \ + --opt adamw \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 40 \ + --save_ckpt_freq 20 \ + --epochs 801 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_h_hybrid_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_h_hybrid_pt.sh new file mode 100644 index 0000000..ef15161 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_h_hybrid_pt.sh @@ -0,0 +1,47 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/pfs_work_dir/vit_h_hybridv2_pretrain' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/hybridv2_sp_train.csv' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-64} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-12} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --async \ + ${SRUN_ARGS} \ + python -u run_mae_pretraining.py \ + --data_path ${DATA_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_huge_patch16_224 \ + --decoder_depth 8 \ + --batch_size 6 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 4 \ + --num_workers 8 \ + --opt adamw \ + --lr 1e-3 \ + --warmup_lr 5e-6 \ + --min_lr 5e-5 \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 15 \ + --save_ckpt_freq 10 \ + --epochs 301 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_l_hybrid_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_l_hybrid_pt.sh new file mode 100644 index 0000000..add557a --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_l_hybrid_pt.sh @@ -0,0 +1,43 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_1m_pretrain' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/hybrid_1m_sp_train.csv' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-48} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-8} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --async \ + ${SRUN_ARGS} \ + python -u run_mae_pretraining.py \ + --data_path ${DATA_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_large_patch16_224 \ + --decoder_depth 12 \ + --batch_size 24 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_workers 8 \ + --opt adamw \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 40 \ + --save_ckpt_freq 20 \ + --epochs 801 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_l_k700_pt.sh b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_l_k700_pt.sh new file mode 100644 index 0000000..06e3433 --- /dev/null +++ b/Pretrain/VideoMAE/scripts/pretrain/slurm_train_vit_l_k700_pt.sh @@ -0,0 +1,47 @@ +export MASTER_PORT=$((12000 + $RANDOM % 20000)) +export OMP_NUM_THREADS=1 + +OUTPUT_DIR='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_k700_pretrain' +DATA_PATH='/mnt/petrelfs/share_data/huangbingkun/data/k700/k700_sp_train.csv' +MODEL_PATH='/mnt/petrelfs/huangbingkun/VideoMAE-clean/work_dir/vit_l_hybrid_1m_pretrain/checkpoint-800.pth' + +JOB_NAME=$1 +PARTITION=${PARTITION:-"video"} +# 8 for 1 node, 16 for 2 node, etc. +GPUS=${GPUS:-32} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-14} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:2} + +# batch_size can be adjusted according to the graphics card +srun -p $PARTITION \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + --quotatype=auto \ + --async \ + ${SRUN_ARGS} \ + python -u run_mae_pretraining.py \ + --data_path ${DATA_PATH} \ + --finetune ${MODEL_PATH} \ + --mask_type t_consist \ + --mask_ratio 0.9 \ + --model pretrain_mae_large_patch16_224 \ + --decoder_depth 12 \ + --batch_size 6 \ + --num_frames 16 \ + --sampling_rate 4 \ + --num_sample 4 \ + --num_workers 14 \ + --opt adamw \ + --opt_betas 0.9 0.95 \ + --warmup_epochs 10 \ + --save_ckpt_freq 10 \ + --epochs 201 \ + --log_dir ${OUTPUT_DIR} \ + --output_dir ${OUTPUT_DIR} \ + ${PY_ARGS} diff --git a/Pretrain/VideoMAE/ssv2.py b/Pretrain/VideoMAE/ssv2.py new file mode 100644 index 0000000..977222d --- /dev/null +++ b/Pretrain/VideoMAE/ssv2.py @@ -0,0 +1,412 @@ +import os +import warnings + +import cv2 +import numpy as np +import torch +from decord import VideoReader, cpu +from petrel_client.client import Client +from torch.utils.data import Dataset +from torchvision import transforms + +import video_transforms as video_transforms +import volume_transforms as volume_transforms +from random_erasing import RandomErasing + + +class SSRawFrameClsDataset(Dataset): + """Load your own raw frame classification dataset.""" + + def __init__(self, + anno_path, + data_path, + mode='train', + clip_len=8, + crop_size=224, + short_side_size=256, + new_height=256, + new_width=340, + keep_aspect_ratio=True, + num_segment=1, + num_crop=1, + test_num_segment=10, + test_num_crop=3, + filename_tmpl='img_{:05}.jpg', + args=None): + self.anno_path = anno_path + self.data_path = data_path + self.mode = mode + self.clip_len = clip_len + self.crop_size = crop_size + self.short_side_size = short_side_size + self.new_height = new_height + self.new_width = new_width + self.keep_aspect_ratio = keep_aspect_ratio + self.num_segment = num_segment + self.test_num_segment = test_num_segment + self.num_crop = num_crop + self.test_num_crop = test_num_crop + self.filename_tmpl = filename_tmpl + self.args = args + self.aug = False + self.rand_erase = False + + if self.mode in ['train']: + self.aug = True + if self.args.reprob > 0: + self.rand_erase = True + if VideoReader is None: + raise ImportError( + "Unable to import `decord` which is required to read videos.") + + import pandas as pd + cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ') + self.dataset_samples = list(cleaned.values[:, 0]) + self.total_frames = list(cleaned.values[:, 1]) + self.label_array = list(cleaned.values[:, -1]) + + self.client = Client() + + if (mode == 'train'): + pass + + elif (mode == 'validation'): + self.data_transform = video_transforms.Compose([ + video_transforms.Resize(self.short_side_size, + interpolation='bilinear'), + video_transforms.CenterCrop(size=(self.crop_size, + self.crop_size)), + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + elif mode == 'test': + self.data_resize = video_transforms.Compose([ + video_transforms.Resize(size=(short_side_size), + interpolation='bilinear') + ]) + self.data_transform = video_transforms.Compose([ + volume_transforms.ClipToTensor(), + video_transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + self.test_seg = [] + self.test_dataset = [] + self.test_total_frames = [] + self.test_label_array = [] + for ck in range(self.test_num_segment): + for cp in range(self.test_num_crop): + for idx in range(len(self.label_array)): + self.test_seg.append((ck, cp)) + self.test_dataset.append(self.dataset_samples[idx]) + self.test_total_frames.append(self.total_frames[idx]) + self.test_label_array.append(self.label_array[idx]) + + def __getitem__(self, index): + if self.mode == 'train': + args = self.args + scale_t = 1 + + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame(sample, + total_frame, + sample_rate_scale=scale_t) # T H W C + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during training".format( + sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame(sample, + total_frame, + sample_rate_scale=scale_t) + + if args.num_sample > 1: + frame_list = [] + label_list = [] + index_list = [] + for _ in range(args.num_sample): + new_frames = self._aug_frame(buffer, args) + label = self.label_array[index] + frame_list.append(new_frames) + label_list.append(label) + index_list.append(index) + return frame_list, label_list, index_list, {} + else: + buffer = self._aug_frame(buffer, args) + + return buffer, self.label_array[index], index, {} + + elif self.mode == 'validation': + sample = self.dataset_samples[index] + total_frame = self.total_frames[index] + buffer = self.load_frame(sample, total_frame) + if len(buffer) == 0: + while len(buffer) == 0: + warnings.warn( + "video {} not correctly loaded during validation". + format(sample)) + index = np.random.randint(self.__len__()) + sample = self.dataset_samples[index] + buffer = self.load_frame(sample, total_frame) + buffer = self.data_transform(buffer) + return buffer, self.label_array[index], sample.split( + "/")[-1].split(".")[0] + + elif self.mode == 'test': + sample = self.test_dataset[index] + total_frame = self.test_total_frames[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.load_frame(sample, total_frame) + + while len(buffer) == 0: + warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\ + str(self.test_dataset[index]), chunk_nb, split_nb)) + index = np.random.randint(self.__len__()) + sample = self.test_dataset[index] + total_frame = self.test_total_frames[index] + chunk_nb, split_nb = self.test_seg[index] + buffer = self.load_frame(sample, total_frame) + + buffer = self.data_resize(buffer) + if isinstance(buffer, list): + buffer = np.stack(buffer, 0) + + spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ + / (self.test_num_crop - 1) + temporal_start = chunk_nb + spatial_start = int(split_nb * spatial_step) + if buffer.shape[1] >= buffer.shape[2]: + buffer = buffer[temporal_start::self.test_num_segment, \ + spatial_start:spatial_start + self.short_side_size, :, :] + else: + buffer = buffer[temporal_start::self.test_num_segment, \ + :, spatial_start:spatial_start + self.short_side_size, :] + + buffer = self.data_transform(buffer) + return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \ + chunk_nb, split_nb + else: + raise NameError('mode {} unkown'.format(self.mode)) + + def _aug_frame( + self, + buffer, + args, + ): + + aug_transform = video_transforms.create_random_augment( + input_size=(self.crop_size, self.crop_size), + auto_augment=args.aa, + interpolation=args.train_interpolation, + ) + + buffer = [transforms.ToPILImage()(frame) for frame in buffer] + + buffer = aug_transform(buffer) + + buffer = [transforms.ToTensor()(img) for img in buffer] + buffer = torch.stack(buffer) # T C H W + buffer = buffer.permute(0, 2, 3, 1) # T H W C + + # T H W C + buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406], + [0.229, 0.224, 0.225]) + # T H W C -> C T H W. + buffer = buffer.permute(3, 0, 1, 2) + # Perform data augmentation. + scl, asp = ( + [0.08, 1.0], + [0.75, 1.3333], + ) + + buffer = spatial_sampling( + buffer, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=self.crop_size, + random_horizontal_flip=False if args.data_set == 'SSV2' else True, + inverse_uniform_sampling=False, + aspect_ratio=asp, + scale=scl, + motion_shift=False) + + if self.rand_erase: + erase_transform = RandomErasing( + args.reprob, + mode=args.remode, + max_count=args.recount, + num_splits=args.recount, + device="cpu", + ) + buffer = buffer.permute(1, 0, 2, 3) + buffer = erase_transform(buffer) + buffer = buffer.permute(1, 0, 2, 3) + + return buffer + + def load_frame(self, sample, num_frames, sample_rate_scale=1): + """Load video content using Decord""" + fname = sample + + if self.mode == 'test': + tick = num_frames / float(self.num_segment) + all_index = [] + for t_seg in range(self.test_num_segment): + tmp_index = [ + int(t_seg * tick / self.test_num_segment + tick * x) + for x in range(self.num_segment) + ] + all_index.extend(tmp_index) + all_index = list(np.sort(np.array(all_index))) + imgs = [] + for idx in all_index: + frame_fname = os.path.join(fname, + self.filename_tmpl.format(idx + 1)) + img_bytes = self.client.get(frame_fname) + img_np = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(img_np, cv2.IMREAD_COLOR) + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + imgs.append(img) + buffer = np.array(imgs) + return buffer + + # handle temporal segments + average_duration = num_frames // self.num_segment + all_index = [] + if average_duration > 0: + if self.mode == 'validation': + all_index = list( + np.multiply(list(range(self.num_segment)), + average_duration) + + np.ones(self.num_segment, dtype=int) * + (average_duration // 2)) + else: + all_index = list( + np.multiply(list(range(self.num_segment)), + average_duration) + + np.random.randint(average_duration, size=self.num_segment)) + elif num_frames > self.num_segment: + if self.mode == 'validation': + all_index = list(range(self.num_segment)) + else: + all_index = list( + np.sort( + np.random.randint(num_frames, size=self.num_segment))) + else: + all_index = [0] * (self.num_segment - num_frames) + list( + range(num_frames)) + all_index = list(np.array(all_index)) + imgs = [] + for idx in all_index: + frame_fname = os.path.join(fname, + self.filename_tmpl.format(idx + 1)) + img_bytes = self.client.get(frame_fname) + img_np = np.frombuffer(img_bytes, np.uint8) + img = cv2.imdecode(img_np, cv2.IMREAD_COLOR) + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + imgs.append(img) + buffer = np.array(imgs) + return buffer + + def __len__(self): + if self.mode != 'test': + return len(self.dataset_samples) + else: + return len(self.test_dataset) + + +def spatial_sampling( + frames, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + random_horizontal_flip=True, + inverse_uniform_sampling=False, + aspect_ratio=None, + scale=None, + motion_shift=False, +): + """ + Perform spatial sampling on the given video frames. If spatial_idx is + -1, perform random scale, random crop, and random flip on the given + frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling + with the given spatial_idx. + Args: + frames (tensor): frames of images sampled from the video. The + dimension is `num frames` x `height` x `width` x `channel`. + spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, + or 2, perform left, center, right crop if width is larger than + height, and perform top, center, buttom crop if height is larger + than width. + min_scale (int): the minimal size of scaling. + max_scale (int): the maximal size of scaling. + crop_size (int): the size of height and width used to crop the + frames. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, + max_scale]. + aspect_ratio (list): Aspect ratio range for resizing. + scale (list): Scale range for resizing. + motion_shift (bool): Whether to apply motion shift for resizing. + Returns: + frames (tensor): spatially sampled frames. + """ + assert spatial_idx in [-1, 0, 1, 2] + if spatial_idx == -1: + if aspect_ratio is None and scale is None: + frames, _ = video_transforms.random_short_side_scale_jitter( + images=frames, + min_size=min_scale, + max_size=max_scale, + inverse_uniform_sampling=inverse_uniform_sampling, + ) + frames, _ = video_transforms.random_crop(frames, crop_size) + else: + transform_func = (video_transforms.random_resized_crop_with_shift + if motion_shift else + video_transforms.random_resized_crop) + frames = transform_func( + images=frames, + target_height=crop_size, + target_width=crop_size, + scale=scale, + ratio=aspect_ratio, + ) + if random_horizontal_flip: + frames, _ = video_transforms.horizontal_flip(0.5, frames) + else: + # The testing is deterministic and no jitter should be performed. + # min_scale, max_scale, and crop_size are expect to be the same. + assert len({min_scale, max_scale, crop_size}) == 1 + frames, _ = video_transforms.random_short_side_scale_jitter( + frames, min_scale, max_scale) + frames, _ = video_transforms.uniform_crop(frames, crop_size, + spatial_idx) + return frames + + +def tensor_normalize(tensor, mean, std): + """ + Normalize a given tensor by subtracting the mean and dividing the std. + Args: + tensor (tensor): tensor to normalize. + mean (tensor or list): mean value to subtract. + std (tensor or list): std to divide. + """ + if tensor.dtype == torch.uint8: + tensor = tensor.float() + tensor = tensor / 255.0 + if type(mean) == list: + mean = torch.tensor(mean) + if type(std) == list: + std = torch.tensor(std) + tensor = tensor - mean + tensor = tensor / std + return tensor diff --git a/Pretrain/VideoMAE/transforms.py b/Pretrain/VideoMAE/transforms.py new file mode 100644 index 0000000..0b9d111 --- /dev/null +++ b/Pretrain/VideoMAE/transforms.py @@ -0,0 +1,571 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import math +import numbers +import random +import warnings + +import numpy as np +import torch +import torchvision +import torchvision.transforms.functional as F +from PIL import Image, ImageOps + + +class ToNumpy: + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img + + +class ToTensor: + + def __init__(self, dtype=torch.float32): + self.dtype = dtype + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return torch.from_numpy(np_img).to(dtype=self.dtype) + + +_pil_interpolation_to_str = { + Image.NEAREST: 'PIL.Image.NEAREST', + Image.BILINEAR: 'PIL.Image.BILINEAR', + Image.BICUBIC: 'PIL.Image.BICUBIC', + Image.LANCZOS: 'PIL.Image.LANCZOS', + Image.HAMMING: 'PIL.Image.HAMMING', + Image.BOX: 'PIL.Image.BOX', +} + + +def _pil_interp(method): + if method == 'bicubic': + return Image.BICUBIC + elif method == 'lanczos': + return Image.LANCZOS + elif method == 'hamming': + return Image.HAMMING + else: + # default bilinear, do we want to allow nearest? + return Image.BILINEAR + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +class RandomResizedCropAndInterpolationWithTwoPic: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), + interpolation='bilinear', second_interpolation='lanczos'): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if second_size is not None: + if isinstance(second_size, tuple): + self.second_size = second_size + else: + self.second_size = (second_size, second_size) + else: + self.second_size = None + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.second_interpolation = _pil_interp(second_interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + if self.second_size is None: + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + else: + return F.resized_crop(img, i, j, h, w, self.size, interpolation), \ + F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation]) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0}'.format(interpolate_str) + if self.second_size is not None: + format_string += ', second_size={0}'.format(self.second_size) + format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation]) + format_string += ')' + return format_string + + + + + + + + + + +class GroupRandomCrop(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img_tuple): + img_group, label = img_tuple + + w, h = img_group[0].size + th, tw = self.size + + out_images = list() + + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + + for img in img_group: + assert(img.size[0] == w and img.size[1] == h) + if w == tw and h == th: + out_images.append(img) + else: + out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) + + return (out_images, label) + + +class GroupCenterCrop(object): + def __init__(self, size): + self.worker = torchvision.transforms.CenterCrop(size) + + def __call__(self, img_tuple): + img_group, label = img_tuple + return ([self.worker(img) for img in img_group], label) + + +class GroupRandomHorizontalFlip(object): + """Randomly horizontally flips the given PIL.Image with a probability of 0.5 + """ + def __init__(self, selective_flip=True, is_flow=False): + self.is_flow = is_flow + self.class_LeftRight = [86,87,93,94,166,167] if selective_flip else [] + + def __call__(self, img_tuple, is_flow=False): + img_group, label = img_tuple + v = random.random() + if (label not in self.class_LeftRight) and v < 0.5: + ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] + if self.is_flow: + for i in range(0, len(ret), 2): + ret[i] = ImageOps.invert(ret[i]) # invert flow pixel values when flipping + return (ret, label) + else: + return img_tuple + +class GroupNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor_tuple): + tensor, label = tensor_tuple + rep_mean = self.mean * (tensor.size()[0]//len(self.mean)) + rep_std = self.std * (tensor.size()[0]//len(self.std)) + + # TODO: make efficient + for t, m, s in zip(tensor, rep_mean, rep_std): + t.sub_(m).div_(s) + + return (tensor,label) + + +class GroupGrayScale(object): + def __init__(self, size): + self.worker = torchvision.transforms.Grayscale(size) + + def __call__(self, img_tuple): + img_group, label = img_tuple + return ([self.worker(img) for img in img_group], label) + + +class GroupScale(object): + """ Rescales the input PIL.Image to the given 'size'. + 'size' will be the size of the smaller edge. + For example, if height > width, then image will be + rescaled to (size * height / width, size) + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + self.worker = torchvision.transforms.Resize(size, interpolation) + + def __call__(self, img_tuple): + img_group, label = img_tuple + return ([self.worker(img) for img in img_group], label) + + +class GroupOverSample(object): + def __init__(self, crop_size, scale_size=None): + self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size) + + if scale_size is not None: + self.scale_worker = GroupScale(scale_size) + else: + self.scale_worker = None + + def __call__(self, img_tuple): + if self.scale_worker is not None: + img_tuple = self.scale_worker(img_tuple) + + img_group, label = img_tuple + + image_w, image_h = img_group[0].size + crop_w, crop_h = self.crop_size + + offsets = GroupMultiScaleCrop.fill_fix_offset(False, image_w, image_h, crop_w, crop_h) + oversample_group = list() + for o_w, o_h in offsets: + normal_group = list() + flip_group = list() + for i, img in enumerate(img_group): + crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) + normal_group.append(crop) + flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) + + if img.mode == 'L' and i % 2 == 0: + flip_group.append(ImageOps.invert(flip_crop)) + else: + flip_group.append(flip_crop) + + oversample_group.extend(normal_group) + oversample_group.extend(flip_group) + return (oversample_group, label) + +class GroupFullResSample(object): + def __init__(self, crop_size, scale_size=None, flip=True): + self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size) + + if scale_size is not None: + self.scale_worker = GroupScale(scale_size) + else: + self.scale_worker = None + self.flip = flip + + def __call__(self, img_tuple): + + if self.scale_worker is not None: + img_tuple = self.scale_worker(img_tuple) + + img_group, label = img_tuple + image_w, image_h = img_group[0].size + crop_w, crop_h = self.crop_size + + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + offsets = list() + offsets.append((0 * w_step, 2 * h_step)) # left + offsets.append((4 * w_step, 2 * h_step)) # right + offsets.append((2 * w_step, 2 * h_step)) # center + + oversample_group = list() + for o_w, o_h in offsets: + normal_group = list() + flip_group = list() + for i, img in enumerate(img_group): + crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) + normal_group.append(crop) + if self.flip: + flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) + + if img.mode == 'L' and i % 2 == 0: + flip_group.append(ImageOps.invert(flip_crop)) + else: + flip_group.append(flip_crop) + + oversample_group.extend(normal_group) + oversample_group.extend(flip_group) + return (oversample_group, label) + +class GroupMultiScaleCrop(object): + + def __init__(self, input_size, scales=None, max_distort=1, fix_crop=True, more_fix_crop=True): + self.scales = scales if scales is not None else [1, 875, .75, .66] + self.max_distort = max_distort + self.fix_crop = fix_crop + self.more_fix_crop = more_fix_crop + self.input_size = input_size if not isinstance(input_size, int) else [input_size, input_size] + self.interpolation = Image.BILINEAR + + def __call__(self, img_tuple): + img_group, label = img_tuple + + im_size = img_group[0].size + + crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) + crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group] + ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) for img in crop_img_group] + return (ret_img_group, label) + + def _sample_crop_size(self, im_size): + image_w, image_h = im_size[0], im_size[1] + + # find a crop size + base_size = min(image_w, image_h) + crop_sizes = [int(base_size * x) for x in self.scales] + crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes] + crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes] + + pairs = [] + for i, h in enumerate(crop_h): + for j, w in enumerate(crop_w): + if abs(i - j) <= self.max_distort: + pairs.append((w, h)) + + crop_pair = random.choice(pairs) + if not self.fix_crop: + w_offset = random.randint(0, image_w - crop_pair[0]) + h_offset = random.randint(0, image_h - crop_pair[1]) + else: + w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1]) + + return crop_pair[0], crop_pair[1], w_offset, h_offset + + def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): + offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h) + return random.choice(offsets) + + @staticmethod + def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + ret = list() + ret.append((0, 0)) # upper left + ret.append((4 * w_step, 0)) # upper right + ret.append((0, 4 * h_step)) # lower left + ret.append((4 * w_step, 4 * h_step)) # lower right + ret.append((2 * w_step, 2 * h_step)) # center + + if more_fix_crop: + ret.append((0, 2 * h_step)) # center left + ret.append((4 * w_step, 2 * h_step)) # center right + ret.append((2 * w_step, 4 * h_step)) # lower center + ret.append((2 * w_step, 0 * h_step)) # upper center + + ret.append((1 * w_step, 1 * h_step)) # upper left quarter + ret.append((3 * w_step, 1 * h_step)) # upper right quarter + ret.append((1 * w_step, 3 * h_step)) # lower left quarter + ret.append((3 * w_step, 3 * h_step)) # lower righ quarter + + return ret + + +class GroupRandomSizedCrop(object): + """Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size + and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio + This is popularly used to train the Inception networks + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + def __init__(self, size, interpolation=Image.BILINEAR): + self.size = size + self.interpolation = interpolation + + def __call__(self, img_tuple): + img_group, label = img_tuple + + for attempt in range(10): + area = img_group[0].size[0] * img_group[0].size[1] + target_area = random.uniform(0.08, 1.0) * area + aspect_ratio = random.uniform(3. / 4, 4. / 3) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if w <= img_group[0].size[0] and h <= img_group[0].size[1]: + x1 = random.randint(0, img_group[0].size[0] - w) + y1 = random.randint(0, img_group[0].size[1] - h) + found = True + break + else: + found = False + x1 = 0 + y1 = 0 + + if found: + out_group = list() + for img in img_group: + img = img.crop((x1, y1, x1 + w, y1 + h)) + assert(img.size == (w, h)) + out_group.append(img.resize((self.size, self.size), self.interpolation)) + return out_group + else: + # Fallback + scale = GroupScale(self.size, interpolation=self.interpolation) + crop = GroupRandomCrop(self.size) + return crop(scale(img_group)) + + +class Stack(object): + + def __init__(self, roll=False): + self.roll = roll + + def __call__(self, img_tuple): + img_group, label = img_tuple + + if img_group[0].mode == 'L': + return (np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2), label) + elif img_group[0].mode == 'RGB': + if self.roll: + return (np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2), label) + else: + return (np.concatenate(img_group, axis=2), label) + + +class ToTorchFormatTensor(object): + """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] + to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ + def __init__(self, div=True): + self.div = div + + def __call__(self, pic_tuple): + pic, label = pic_tuple + + if isinstance(pic, np.ndarray): + # handle numpy array + img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() + else: + # handle PIL Image + img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) + img = img.view(pic.size[1], pic.size[0], len(pic.mode)) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + return (img.float().div(255.) if self.div else img.float(), label) + + +class IdentityTransform(object): + + def __call__(self, data): + return data + + +# if __name__ == "__main__": +# trans = torchvision.transforms.Compose([ +# GroupScale(256), +# GroupRandomCrop(224), +# Stack(), +# ToTorchFormatTensor(), +# GroupNormalize( +# mean=[.485, .456, .406], +# std=[.229, .224, .225] +# )] +# ) + +# im = Image.open('../tensorflow-model-zoo.torch/lena_299.png') + +# color_group = [im] * 3 +# rst = trans(color_group) + +# gray_group = [im.convert('L')] * 9 +# gray_rst = trans(gray_group) + +# trans2 = torchvision.transforms.Compose([ +# GroupRandomSizedCrop(256), +# Stack(), +# ToTorchFormatTensor(), +# GroupNormalize( +# mean=[.485, .456, .406], +# std=[.229, .224, .225]) +# ]) +# print(trans2(color_group)) \ No newline at end of file diff --git a/Pretrain/VideoMAE/utils.py b/Pretrain/VideoMAE/utils.py new file mode 100644 index 0000000..bcbd61b --- /dev/null +++ b/Pretrain/VideoMAE/utils.py @@ -0,0 +1,630 @@ +# -------------------------------------------------------- +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +import datetime +import io +import json +import math +import os +import random +import subprocess +import time +from collections import defaultdict, deque +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +from tensorboardX import SummaryWriter +from timm.utils import get_state_dict +from torch._six import inf +from torch.utils.data._utils.collate import default_collate + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], + dtype=torch.float64, + device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format(median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append("{}: {}".format(name, str(meter))) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f} (max: {max:.4f})') + data_time = SmoothedValue(fmt='{avg:.4f} (max: {max:.4f})') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', + 'time: {time}', 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print( + log_msg.format( + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print( + log_msg.format(i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +class TensorboardLogger(object): + + def __init__(self, log_dir): + self.writer = SummaryWriter(logdir=log_dir) + self.step = 0 + + def set_step(self, step=None): + if step is not None: + self.step = step + else: + self.step += 1 + + def update(self, head='scalar', step=None, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.writer.add_scalar(head + "/" + k, v, + self.step if step is None else step) + + def flush(self): + self.writer.flush() + + +def seed_worker(worker_id): + worker_seed = torch.initial_seed() % 2**32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def _load_checkpoint_for_ema(model_ema, checkpoint): + """ + Workaround for ModelEma._load_checkpoint to accept an already-loaded object + """ + mem_file = io.BytesIO() + torch.save(checkpoint, mem_file) + mem_file.seek(0) + model_ema._load_checkpoint(mem_file) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = int(os.environ['SLURM_LOCALID']) + args.world_size = int(os.environ['SLURM_NTASKS']) + os.environ['RANK'] = str(args.rank) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['WORLD_SIZE'] = str(args.world_size) + + node_list = os.environ['SLURM_NODELIST'] + addr = subprocess.getoutput( + f'scontrol show hostname {node_list} | head -n1') + if 'MASTER_ADDR' not in os.environ: + os.environ['MASTER_ADDR'] = addr + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), + flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank) + torch.cuda.empty_cache() + torch.distributed.barrier() + assert torch.distributed.is_initialized() + setup_for_distributed(args.rank == 0) + + +def load_state_dict(model, + state_dict, + prefix='', + ignore_missing="relative_position_index"): + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get( + prefix[:-1], {}) + module._load_from_state_dict(state_dict, prefix, local_metadata, True, + missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + + load(model, prefix=prefix) + + warn_missing_keys = [] + ignore_missing_keys = [] + for key in missing_keys: + keep_flag = True + for ignore_key in ignore_missing.split('|'): + if ignore_key in key: + keep_flag = False + break + if keep_flag: + warn_missing_keys.append(key) + else: + ignore_missing_keys.append(key) + + missing_keys = warn_missing_keys + + if len(missing_keys) > 0: + print("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + print("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(ignore_missing_keys) > 0: + print( + "Ignored weights of {} not initialized from pretrained model: {}". + format(model.__class__.__name__, ignore_missing_keys)) + if len(error_msgs) > 0: + print('\n'.join(error_msgs)) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, + loss, + optimizer, + clip_grad=None, + parameters=None, + create_graph=False, + update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_( + optimizer + ) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) + for p in parameters) + else: + total_norm = torch.norm( + torch.stack([ + torch.norm(p.grad.detach(), norm_type).to(device) + for p in parameters + ]), norm_type) + return total_norm + + +def cosine_scheduler(base_value, + final_value, + epochs, + niter_per_ep, + warmup_epochs=0, + start_warmup_value=0, + warmup_steps=-1): + warmup_schedule = np.array([]) + warmup_iters = warmup_epochs * niter_per_ep + if warmup_steps > 0: + warmup_iters = warmup_steps + print("Set warmup steps = %d" % warmup_iters) + if warmup_epochs > 0: + warmup_schedule = np.linspace(start_warmup_value, base_value, + warmup_iters) + + iters = np.arange(epochs * niter_per_ep - warmup_iters) + schedule = np.array([ + final_value + 0.5 * (base_value - final_value) * + (1 + math.cos(math.pi * i / (len(iters)))) for i in iters + ]) + + schedule = np.concatenate((warmup_schedule, schedule)) + + assert len(schedule) == epochs * niter_per_ep + return schedule + + +def save_model(args, + epoch, + model, + model_without_ddp, + optimizer, + loss_scaler, + model_ema=None): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + if loss_scaler is not None: + checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, + } + + if model_ema is not None: + to_save['model_ema'] = get_state_dict(model_ema) + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + if model_ema is not None: + client_state['model_ema'] = get_state_dict(model_ema) + model.save_checkpoint(save_dir=args.output_dir, + tag="checkpoint-%s" % epoch_name, + client_state=client_state) + + +def auto_load_model(args, + model, + model_without_ddp, + optimizer, + loss_scaler, + model_ema=None): + output_dir = Path(args.output_dir) + if loss_scaler is not None: + # torch.amp + if args.auto_resume and len(args.resume) == 0: + import glob + all_checkpoints = glob.glob( + os.path.join(output_dir, 'checkpoint-*.pth')) + latest_ckpt = -1 + for ckpt in all_checkpoints: + t = ckpt.split('-')[-1].split('.')[0] + if t.isdigit(): + latest_ckpt = max(int(t), latest_ckpt) + if latest_ckpt >= 0: + args.resume = os.path.join(output_dir, + 'checkpoint-%d.pth' % latest_ckpt) + print("Auto resume checkpoint: %s" % args.resume) + + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + print("Resume checkpoint %s" % args.resume) + if 'optimizer' in checkpoint and 'epoch' in checkpoint: + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + if hasattr(args, 'model_ema') and args.model_ema: + _load_checkpoint_for_ema(model_ema, + checkpoint['model_ema']) + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + print("With optim & sched!") + else: + # deepspeed, only support '--auto_resume'. + if args.auto_resume: + import glob + all_checkpoints = glob.glob( + os.path.join(output_dir, 'checkpoint-*')) + latest_ckpt = -1 + for ckpt in all_checkpoints: + t = ckpt.split('-')[-1].split('.')[0] + if t.isdigit(): + latest_ckpt = max(int(t), latest_ckpt) + if latest_ckpt >= 0: + args.resume = os.path.join(output_dir, + 'checkpoint-%d' % latest_ckpt) + print("Auto resume checkpoint: %d" % latest_ckpt) + _, client_states = model.load_checkpoint(args.output_dir, + tag='checkpoint-%d' % + latest_ckpt) + if 'epoch' in client_states: + args.start_epoch = client_states['epoch'] + 1 + if model_ema is not None: + if args.model_ema: + _load_checkpoint_for_ema(model_ema, + client_states['model_ema']) + + +def create_ds_config(args): + args.deepspeed_config = os.path.join(args.output_dir, + "deepspeed_config.json") + with open(args.deepspeed_config, mode="w") as writer: + ds_config = { + "train_batch_size": + args.batch_size * args.update_freq * get_world_size(), + "train_micro_batch_size_per_gpu": + args.batch_size, + "steps_per_print": + 1000, + "gradient_clipping": + 0.0 if args.clip_grad is None else args.clip_grad, + "optimizer": { + "type": "Adam", + "adam_w_mode": True, + "params": { + "lr": args.lr, + "weight_decay": args.weight_decay, + "bias_correction": True, + "betas": [0.9, 0.999], + "eps": 1e-8 + } + }, + "fp16": { + "enabled": True, + "loss_scale": 0, + "initial_scale_power": 7, + "loss_scale_window": 128 + } + } + + writer.write(json.dumps(ds_config, indent=2)) + + +def multiple_samples_collate(batch, fold=False): + """ + Collate function for repeated augmentation. Each instance in the batch has + more than one sample. + Args: + batch (tuple or list): data batch to collate. + Returns: + (tuple): collated data batch. + """ + inputs, labels, video_idx, extra_data = zip(*batch) + inputs = [item for sublist in inputs for item in sublist] + labels = [item for sublist in labels for item in sublist] + video_idx = [item for sublist in video_idx for item in sublist] + inputs, labels, video_idx, extra_data = ( + default_collate(inputs), + default_collate(labels), + default_collate(video_idx), + default_collate(extra_data), + ) + if fold: + return [inputs], labels, video_idx, extra_data + else: + return inputs, labels, video_idx, extra_data + + +def multiple_pretrain_samples_collate(batch, fold=False): + """ + Collate function for repeated augmentation. Each instance in the batch has + more than one sample. + Args: + batch (tuple or list): data batch to collate. + Returns: + (tuple): collated data batch. + """ + process_data, mask = zip(*batch) + process_data = [item for sublist in process_data for item in sublist] + mask = [item for sublist in mask for item in sublist] + process_data, mask = ( + default_collate(process_data), + default_collate(mask), + ) + if fold: + return [process_data], mask + else: + return process_data, mask diff --git a/Pretrain/VideoMAE/video_transforms.py b/Pretrain/VideoMAE/video_transforms.py new file mode 100644 index 0000000..5d8134e --- /dev/null +++ b/Pretrain/VideoMAE/video_transforms.py @@ -0,0 +1,1283 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import math +import numbers + +# import cv2 +import random + +import numpy as np +import PIL +import torch +import torchvision +import torchvision.transforms.functional as F +from PIL import Image +from torchvision import transforms + +import functional as FF +from rand_augment import rand_augment_transform +from random_erasing import RandomErasing + +_pil_interpolation_to_str = { + Image.NEAREST: "PIL.Image.NEAREST", + Image.BILINEAR: "PIL.Image.BILINEAR", + Image.BICUBIC: "PIL.Image.BICUBIC", + Image.LANCZOS: "PIL.Image.LANCZOS", + Image.HAMMING: "PIL.Image.HAMMING", + Image.BOX: "PIL.Image.BOX", +} + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _pil_interp(method): + if method == "bicubic": + return Image.BICUBIC + elif method == "lanczos": + return Image.LANCZOS + elif method == "hamming": + return Image.HAMMING + else: + return Image.BILINEAR + + +def random_short_side_scale_jitter( + images, min_size, max_size, boxes=None, inverse_uniform_sampling=False +): + """ + Perform a spatial short scale jittering on the given images and + corresponding boxes. + Args: + images (tensor): images to perform scale jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + min_size (int): the minimal size to scale the frames. + max_size (int): the maximal size to scale the frames. + boxes (ndarray): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, max_scale]. + Returns: + (tensor): the scaled images with dimension of + `num frames` x `channel` x `new height` x `new width`. + (ndarray or None): the scaled boxes with dimension of + `num boxes` x 4. + """ + if inverse_uniform_sampling: + size = int( + round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) + ) + else: + size = int(round(np.random.uniform(min_size, max_size))) + + height = images.shape[2] + width = images.shape[3] + if (width <= height and width == size) or ( + height <= width and height == size + ): + return images, boxes + new_width = size + new_height = size + if width < height: + new_height = int(math.floor((float(height) / width) * size)) + if boxes is not None: + boxes = boxes * float(new_height) / height + else: + new_width = int(math.floor((float(width) / height) * size)) + if boxes is not None: + boxes = boxes * float(new_width) / width + + return ( + torch.nn.functional.interpolate( + images, + size=(new_height, new_width), + mode="bilinear", + align_corners=False, + ), + boxes, + ) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Peform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to peform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def random_crop(images, size, boxes=None): + """ + Perform random spatial crop on the given images and corresponding boxes. + Args: + images (tensor): images to perform random crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): the size of height and width to crop on the image. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + cropped (tensor): cropped images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + if images.shape[2] == size and images.shape[3] == size: + return images + height = images.shape[2] + width = images.shape[3] + y_offset = 0 + if height > size: + y_offset = int(np.random.randint(0, height - size)) + x_offset = 0 + if width > size: + x_offset = int(np.random.randint(0, width - size)) + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + + return cropped, cropped_boxes + + +def horizontal_flip(prob, images, boxes=None): + """ + Perform horizontal flip on the given images and corresponding boxes. + Args: + prob (float): probility to flip the images. + images (tensor): images to perform horizontal flip, the dimension is + `num frames` x `channel` x `height` x `width`. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + images (tensor): images with dimension of + `num frames` x `channel` x `height` x `width`. + flipped_boxes (ndarray or None): the flipped boxes with dimension of + `num boxes` x 4. + """ + if boxes is None: + flipped_boxes = None + else: + flipped_boxes = boxes.copy() + + if np.random.uniform() < prob: + images = images.flip((-1)) + + if len(images.shape) == 3: + width = images.shape[2] + elif len(images.shape) == 4: + width = images.shape[3] + else: + raise NotImplementedError("Dimension does not supported") + if boxes is not None: + flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 + + return images, flipped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +def clip_boxes_to_image(boxes, height, width): + """ + Clip an array of boxes to an image with the given height and width. + Args: + boxes (ndarray): bounding boxes to perform clipping. + Dimension is `num boxes` x 4. + height (int): given image height. + width (int): given image width. + Returns: + clipped_boxes (ndarray): the clipped boxes with dimension of + `num boxes` x 4. + """ + clipped_boxes = boxes.copy() + clipped_boxes[:, [0, 2]] = np.minimum( + width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) + ) + clipped_boxes[:, [1, 3]] = np.minimum( + height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) + ) + return clipped_boxes + + +def blend(images1, images2, alpha): + """ + Blend two images with a given weight alpha. + Args: + images1 (tensor): the first images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + images2 (tensor): the second images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + alpha (float): the blending weight. + Returns: + (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + return images1 * alpha + images2 * (1 - alpha) + + +def grayscale(images): + """ + Get the grayscale for the input images. The channels of images should be + in order BGR. + Args: + images (tensor): the input images for getting grayscale. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + img_gray (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + # R -> 0.299, G -> 0.587, B -> 0.114. + img_gray = torch.tensor(images) + gray_channel = ( + 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] + ) + img_gray[:, 0] = gray_channel + img_gray[:, 1] = gray_channel + img_gray[:, 2] = gray_channel + return img_gray + + +def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): + """ + Perfrom a color jittering on the input images. The channels of images + should be in order BGR. + Args: + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + img_brightness (float): jitter ratio for brightness. + img_contrast (float): jitter ratio for contrast. + img_saturation (float): jitter ratio for saturation. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + + jitter = [] + if img_brightness != 0: + jitter.append("brightness") + if img_contrast != 0: + jitter.append("contrast") + if img_saturation != 0: + jitter.append("saturation") + + if len(jitter) > 0: + order = np.random.permutation(np.arange(len(jitter))) + for idx in range(0, len(jitter)): + if jitter[order[idx]] == "brightness": + images = brightness_jitter(img_brightness, images) + elif jitter[order[idx]] == "contrast": + images = contrast_jitter(img_contrast, images) + elif jitter[order[idx]] == "saturation": + images = saturation_jitter(img_saturation, images) + return images + + +def brightness_jitter(var, images): + """ + Perfrom brightness jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for brightness. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_bright = torch.zeros(images.shape) + images = blend(images, img_bright, alpha) + return images + + +def contrast_jitter(var, images): + """ + Perfrom contrast jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for contrast. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_gray = grayscale(images) + img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) + images = blend(images, img_gray, alpha) + return images + + +def saturation_jitter(var, images): + """ + Perfrom saturation jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for saturation. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + img_gray = grayscale(images) + images = blend(images, img_gray, alpha) + + return images + + +def lighting_jitter(images, alphastd, eigval, eigvec): + """ + Perform AlexNet-style PCA jitter on the given images. + Args: + images (tensor): images to perform lighting jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + alphastd (float): jitter ratio for PCA jitter. + eigval (list): eigenvalues for PCA jitter. + eigvec (list[list]): eigenvectors for PCA jitter. + Returns: + out_images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if alphastd == 0: + return images + # generate alpha1, alpha2, alpha3. + alpha = np.random.normal(0, alphastd, size=(1, 3)) + eig_vec = np.array(eigvec) + eig_val = np.reshape(eigval, (1, 3)) + rgb = np.sum( + eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), + axis=1, + ) + out_images = torch.zeros_like(images) + if len(images.shape) == 3: + # C H W + channel_dim = 0 + elif len(images.shape) == 4: + # T C H W + channel_dim = 1 + else: + raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") + + for idx in range(images.shape[channel_dim]): + # C H W + if len(images.shape) == 3: + out_images[idx] = images[idx] + rgb[2 - idx] + # T C H W + elif len(images.shape) == 4: + out_images[:, idx] = images[:, idx] + rgb[2 - idx] + else: + raise NotImplementedError( + f"Unsupported dimension {len(images.shape)}" + ) + + return out_images + + +def color_normalization(images, mean, stddev): + """ + Perform color nomration on the given images. + Args: + images (tensor): images to perform color normalization. Dimension is + `num frames` x `channel` x `height` x `width`. + mean (list): mean values for normalization. + stddev (list): standard deviations for normalization. + + Returns: + out_images (tensor): the noramlized images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if len(images.shape) == 3: + assert ( + len(mean) == images.shape[0] + ), "channel mean not computed properly" + assert ( + len(stddev) == images.shape[0] + ), "channel stddev not computed properly" + elif len(images.shape) == 4: + assert ( + len(mean) == images.shape[1] + ), "channel mean not computed properly" + assert ( + len(stddev) == images.shape[1] + ), "channel stddev not computed properly" + else: + raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") + + out_images = torch.zeros_like(images) + for idx in range(len(mean)): + # C H W + if len(images.shape) == 3: + out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] + elif len(images.shape) == 4: + out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] + else: + raise NotImplementedError( + f"Unsupported dimension {len(images.shape)}" + ) + return out_images + + +def _get_param_spatial_crop( + scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False +): + """ + Given scale, ratio, height and width, return sampled coordinates of the videos. + """ + for _ in range(num_repeat): + area = height * width + target_area = random.uniform(*scale) * area + if log_scale: + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + else: + aspect_ratio = random.uniform(*ratio) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if np.random.uniform() < 0.5 and switch_hw: + w, h = h, w + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + +def random_resized_crop( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + Crop the given images to random size and aspect ratio. A crop of random + size (default: of 0.08 to 1.0) of the original size and a random aspect + ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This + crop is finally resized to given size. This is popularly used to train the + Inception networks. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + cropped = images[:, :, i : i + h, j : j + w] + return torch.nn.functional.interpolate( + cropped, + size=(target_height, target_width), + mode="bilinear", + align_corners=False, + ) + + +def random_resized_crop_with_shift( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + This is similar to random_resized_crop. However, it samples two different + boxes (for cropping) for the first and last frame. It then linearly + interpolates the two boxes for other frames. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + t = images.shape[1] + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) + i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] + j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] + h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] + w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] + out = torch.zeros((3, t, target_height, target_width)) + for ind in range(t): + out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate( + images[ + :, + ind : ind + 1, + i_s[ind] : i_s[ind] + h_s[ind], + j_s[ind] : j_s[ind] + w_s[ind], + ], + size=(target_height, target_width), + mode="bilinear", + align_corners=False, + ) + return out + + +def create_random_augment( + input_size, + auto_augment=None, + interpolation="bilinear", +): + """ + Get video randaug transform. + + Args: + input_size: The size of the input video in tuple. + auto_augment: Parameters for randaug. An example: + "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number + of operations to apply). + interpolation: Interpolation method. + """ + if isinstance(input_size, tuple): + img_size = input_size[-2:] + else: + img_size = input_size + + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = {"translate_const": int(img_size_min * 0.45)} + if interpolation and interpolation != "random": + aa_params["interpolation"] = _pil_interp(interpolation) + if auto_augment.startswith("rand"): + return transforms.Compose( + [rand_augment_transform(auto_augment, aa_params)] + ) + raise NotImplementedError + + +def random_sized_crop_img( + im, + size, + jitter_scale=(0.08, 1.0), + jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), + max_iter=10, +): + """ + Performs Inception-style cropping (used for training). + """ + assert ( + len(im.shape) == 3 + ), "Currently only support image for random_sized_crop" + h, w = im.shape[1:3] + i, j, h, w = _get_param_spatial_crop( + scale=jitter_scale, + ratio=jitter_aspect, + height=h, + width=w, + num_repeat=max_iter, + log_scale=False, + switch_hw=True, + ) + cropped = im[:, i : i + h, j : j + w] + return torch.nn.functional.interpolate( + cropped.unsqueeze(0), + size=(size, size), + mode="bilinear", + align_corners=False, + ).squeeze(0) + + +# The following code are modified based on timm lib, we will replace the following +# contents with dependency from PyTorchVideo. +# https://github.com/facebookresearch/pytorchvideo +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation="bilinear", + ): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + print("range should be of kind (min, max)") + + if interpolation == "random": + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for _ in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = " ".join( + [_pil_interpolation_to_str[x] for x in self.interpolation] + ) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + "(size={0}".format(self.size) + format_string += ", scale={0}".format( + tuple(round(s, 4) for s in self.scale) + ) + format_string += ", ratio={0}".format( + tuple(round(r, 4) for r in self.ratio) + ) + format_string += ", interpolation={0})".format(interpolate_str) + return format_string + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0.0, + color_jitter=0.4, + auto_augment=None, + interpolation="random", + use_prefetcher=False, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + re_prob=0.0, + re_mode="const", + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + if isinstance(img_size, tuple): + img_size = img_size[-2:] + else: + img_size = img_size + + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple( + ratio or (3.0 / 4.0, 4.0 / 3.0) + ) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation( + img_size, scale=scale, ratio=ratio, interpolation=interpolation + ) + ] + if hflip > 0.0: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.0: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != "random": + aa_params["interpolation"] = _pil_interp(interpolation) + if auto_augment.startswith("rand"): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith("augmix"): + raise NotImplementedError("Augmix not implemented") + else: + raise NotImplementedError("Auto aug not implemented") + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), + ] + if re_prob > 0.0: + final_tfl.append( + RandomErasing( + re_prob, + mode=re_mode, + max_count=re_count, + num_splits=re_num_splits, + device="cpu", + cube=False, + ) + ) + + if separate: + return ( + transforms.Compose(primary_tfl), + transforms.Compose(secondary_tfl), + transforms.Compose(final_tfl), + ) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + +############################################################################################################ +############################################################################################################ + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = FF.resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = FF.resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ThreeCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w != im_w and h != im_h: + clip = FF.resize_clip(clip, self.size, interpolation="bilinear") + im_h, im_w, im_c = clip[0].shape + + step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0) + cropped = [] + for i in range(3): + if (im_h > self.size[0]): + x1 = 0 + y1 = i * step + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + else: + x1 = i * step + y1 = 0 + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + return cropped + + +class RandomRotation(object): + """Rotate entire clip randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + import skimage + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """Randomly change the brightness, contrast and saturation and hue of the clip + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img transform function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This transform acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip. + """ + return FF.normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) diff --git a/Pretrain/VideoMAE/vis.sh b/Pretrain/VideoMAE/vis.sh new file mode 100644 index 0000000..14b0bf3 --- /dev/null +++ b/Pretrain/VideoMAE/vis.sh @@ -0,0 +1,22 @@ +# Set the path to save images +OUTPUT_DIR='/apdcephfs/share_1290939/elliottong/work_dir/output_dir/MAE_video/demo/vis_k400_1_0.9' +# path to image for visualization +# IMAGE_PATH='/apdcephfs/share_1290939/elliottong/dataset/Diving48/Y7QZcr24ye0_00538.mp4' +# IMAGE_PATH='/apdcephfs/share_1290939/elliottong/dataset/20bn-something-something-v2/16977.mp4' +IMAGE_PATH='/apdcephfs/share_1290939/elliottong/kinetics_400_val_10s_320p/-B2oGkg1qSI.mp4' + + +# path to pretrain model +# MODEL_PATH='/apdcephfs/share_1290939/elliottong/work_dir/output_dir/MAE_video/diving_pretrain_mae_base_patch16_224_frame_16x2_tc_mask_0.9_lr_3e-4_new_e3200/checkpoint-3199.pth' +# MODEL_PATH='/apdcephfs/share_1290939/elliottong/work_dir/output_dir/MAE_video/ssv2_pretrain_mae_base_patch16_224_frame_16x2_tc_mask_0.9_new_e2400/checkpoint-2399.pth' +MODEL_PATH='/apdcephfs/share_1290939/elliottong/work_dir/output_dir/MAE_video/ssv2_pretrain_mae_base_patch16_224_frame_16x2_tc_mask_0.9_new_e2400/checkpoint-2399.pth' + + +# Now, it only supports pretrained models with normalized pixel targets +python3 run_mae_vis.py \ + --mask_ratio 0.9 \ + --mask_type t_consist \ + --decoder_depth 4 \ + --model pretrain_mae_base_patch16_224 \ + --mask_type t_consist \ + ${IMAGE_PATH} ${OUTPUT_DIR} ${MODEL_PATH} \ No newline at end of file diff --git a/Pretrain/VideoMAE/vits.py b/Pretrain/VideoMAE/vits.py new file mode 100644 index 0000000..2e581b7 --- /dev/null +++ b/Pretrain/VideoMAE/vits.py @@ -0,0 +1,146 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +from functools import partial, reduce +from operator import mul + +import torch +import torch.nn as nn +from timm.models.layers.helpers import to_2tuple + +from modeling_finetune import PatchEmbed, VisionTransformer, _cfg + +__all__ = [ + 'vit_small', + 'vit_base', + 'vit_conv_small', + 'vit_conv_base', +] + + +class VisionTransformerMoCo(VisionTransformer): + def __init__(self, stop_grad_conv1=False, **kwargs): + super().__init__(**kwargs) + # Use fixed 2D sin-cos position embedding + # self.build_2d_sincos_position_embedding() + + # weight initialization + for name, m in self.named_modules(): + if isinstance(m, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt(6. / float(m.weight.shape[0] // 3 + m.weight.shape[1])) + nn.init.uniform_(m.weight, -val, val) + else: + nn.init.xavier_uniform_(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + ####################################### + # nn.init.normal_(self.cls_token, std=1e-6) + ####################################### + + if isinstance(self.patch_embed, PatchEmbed): + # xavier_uniform initialization + val = math.sqrt(6. / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim)) + nn.init.uniform_(self.patch_embed.proj.weight, -val, val) + nn.init.zeros_(self.patch_embed.proj.bias) + + if stop_grad_conv1: + self.patch_embed.proj.weight.requires_grad = False + self.patch_embed.proj.bias.requires_grad = False + + # def build_2d_sincos_position_embedding(self, temperature=10000.): + # h, w = self.patch_embed.grid_size + # grid_w = torch.arange(w, dtype=torch.float32) + # grid_h = torch.arange(h, dtype=torch.float32) + # grid_w, grid_h = torch.meshgrid(grid_w, grid_h) + # assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding' + # pos_dim = self.embed_dim // 4 + # omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim + # omega = 1. / (temperature**omega) + # out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega]) + # out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega]) + # pos_emb = torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1)[None, :, :] + + # assert self.num_tokens == 1, 'Assuming one and only one token, [cls]' + # pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32) + # self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1)) + # self.pos_embed.requires_grad = False + + +class ConvStem(nn.Module): + """ + ConvStem, from Early Convolutions Help Transformers See Better, Tete et al. https://arxiv.org/abs/2106.14881 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + + assert patch_size == 16, 'ConvStem only supports patch size of 16' + assert embed_dim % 8 == 0, 'Embed dimension must be divisible by 8 for ConvStem' + + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + # build stem, similar to the design in https://arxiv.org/abs/2106.14881 + stem = [] + input_dim, output_dim = 3, embed_dim // 8 + for l in range(4): + stem.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=2, padding=1, bias=False)) + stem.append(nn.BatchNorm2d(output_dim)) + stem.append(nn.ReLU(inplace=True)) + input_dim = output_dim + output_dim *= 2 + stem.append(nn.Conv2d(input_dim, embed_dim, kernel_size=1)) + self.proj = nn.Sequential(*stem) + + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x + + +def vit_small(**kwargs): + model = VisionTransformerMoCo( + patch_size=16, embed_dim=384, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +def vit_base(**kwargs): + model = VisionTransformerMoCo( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model.default_cfg = _cfg() + return model + +def vit_conv_small(**kwargs): + # minus one ViT block + model = VisionTransformerMoCo( + patch_size=16, embed_dim=384, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs) + model.default_cfg = _cfg() + return model + +def vit_conv_base(**kwargs): + # minus one ViT block + model = VisionTransformerMoCo( + patch_size=16, embed_dim=768, depth=11, num_heads=12, mlp_ratio=4, qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), embed_layer=ConvStem, **kwargs) + model.default_cfg = _cfg() + return model \ No newline at end of file diff --git a/Pretrain/VideoMAE/volume_transforms.py b/Pretrain/VideoMAE/volume_transforms.py new file mode 100644 index 0000000..80e002a --- /dev/null +++ b/Pretrain/VideoMAE/volume_transforms.py @@ -0,0 +1,131 @@ +import numpy as np +import torch +from PIL import Image + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format + """ + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255.0 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(tensor_clip, 255) + return tensor_clip + + +# Note this norms data to -1/1 +class ClipToTensor_K(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( + ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError('Expected numpy.ndarray or PIL.Image\ + but got list of {0}'.format(type(clip[0]))) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = (np_clip - 127.5) / 127.5 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor + """ + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor diff --git a/README.md b/README.md index a3ddd3d..c70bfdd 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,18 @@ -# InternVideo +# InternVideo \[[Paper\]](https://arxiv.org/pdf/2212.03191.pdf) +[中文 README](README_cn.md) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/action-classification-on-kinetics-400)](https://paperswithcode.com/sota/action-classification-on-kinetics-400?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/action-classification-on-kinetics-600)](https://paperswithcode.com/sota/action-classification-on-kinetics-600?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/action-classification-on-kinetics-700)](https://paperswithcode.com/sota/action-classification-on-kinetics-700?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/action-recognition-in-videos-on-something-1)](https://paperswithcode.com/sota/action-recognition-in-videos-on-something-1?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/action-recognition-in-videos-on-something)](https://paperswithcode.com/sota/action-recognition-in-videos-on-something?p=internvideo-general-video-foundation-models) - [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/spatio-temporal-action-localization-on-ava)](https://paperswithcode.com/sota/spatio-temporal-action-localization-on-ava?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/action-recognition-on-ava-v2-2)](https://paperswithcode.com/sota/action-recognition-on-ava-v2-2?p=internvideo-general-video-foundation-models) - [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-activitynet)](https://paperswithcode.com/sota/video-retrieval-on-activitynet?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-didemo)](https://paperswithcode.com/sota/video-retrieval-on-didemo?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-msr-vtt)](https://paperswithcode.com/sota/video-retrieval-on-msr-vtt?p=internvideo-general-video-foundation-models) -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-lsmdc)](https://paperswithcode.com/sota/video-retrieval-on-lsmdc?p=internvideo-general-video-foundation-models) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-lsmdc)](https://paperswithcode.com/sota/video-retrieval-on-lsmdc?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-msvd)](https://paperswithcode.com/sota/video-retrieval-on-msvd?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/video-retrieval-on-vatex)](https://paperswithcode.com/sota/video-retrieval-on-vatex?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/zero-shot-video-retrieval-on-activitynet)](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-activitynet?p=internvideo-general-video-foundation-models) @@ -22,16 +21,136 @@ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/zero-shot-video-retrieval-on-lsmdc)](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-lsmdc?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/zero-shot-video-retrieval-on-msvd)](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-msvd?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/zero-shot-video-retrieval-on-vatex)](https://paperswithcode.com/sota/zero-shot-video-retrieval-on-vatex?p=internvideo-general-video-foundation-models) - - [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/visual-question-answering-on-msrvtt-qa-1)](https://paperswithcode.com/sota/visual-question-answering-on-msrvtt-qa-1?p=internvideo-general-video-foundation-models) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/visual-question-answering-on-msvd-qa-1)](https://paperswithcode.com/sota/visual-question-answering-on-msvd-qa-1?p=internvideo-general-video-foundation-models) -[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/visual-question-answering-on-tgif-qa)](https://paperswithcode.com/sota/visual-question-answering-on-tgif-qa?p=internvideo-general-video-foundation-models) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/visual-question-answering-on-tgif-qa)](https://paperswithcode.com/sota/visual-question-answering-on-tgif-qa?p=internvideo-general-video-foundation-models) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/open-set-action-recognition-on-ucf101-mitv2)](https://paperswithcode.com/sota/open-set-action-recognition-on-ucf101-mitv2?p=internvideo-general-video-foundation-models) +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/internvideo-general-video-foundation-models/open-set-action-recognition-on-ucf-hmdb)](https://paperswithcode.com/sota/open-set-action-recognition-on-ucf-hmdb?p=internvideo-general-video-foundation-models) + +This repo gives the official implmentation of '[InternVideo: General Video Foundation Models via Generative and Discriminative Learning](https://arxiv.org/abs/2212.03191)' + +- **Achieved `91.1%` Top1 accuracy in Kinetics 400, surpassing the `90%` milestone for `the first time`.** +- **Achieved `77.2%` Top1 accuracy in Something-Something V2.** +- **Achieved `SOTA` performance on `39` video datasets (including action recognition, temporal localization, retrieval, etc) when released in 2022.** + +## Updates +- `Sep 7, 2023`: [**ViCLIP**](https://huggingface.co/OpenGVLab/ViCLIP): a simple video CLIP for transferrable video-text representation is available at [Hugging Face](https://huggingface.co/OpenGVLab/ViCLIP) +- `July 16, 2023`: A **video-text dataset InternVid** is partially released at [here](Data/InternVid) for facilitating multimodal understanding and generation. A subset of this dataset, consisting of 10 million video clips, is available at [Hugging Face](https://huggingface.co/datasets/OpenGVLab/InternVid). +- `May 11, 2023`: **Video instruction data** are released at [here](Data/instruction_data) for tuning end-to-end video-centric multimodal dialogue systems like [VideoChat](https://github.com/OpenGVLab/Ask-Anything). +- `Mar 8, 2023`: All pretrained foundation model weights are released. See them from [here](#model-zoo). +- `Feb 19, 2023`: Some pretrained foundation model weights (-L) are released. +- `Feb 5, 2023`: The code & model of multimodal learning are released. +- `Jan 18, 2023`: The code of vision-language navigation is released. +- `Jan 16, 2023`: The code of video question answering, zero-shot action recognition, and zero-shot multiple choice is released. +- `Jan 1, 2023`: The code & model of spatio-temporal action localiztion are released. +- `Dec 27, 2022`: The code & model of partial pretraining (VideoMAE) and downstream applications (video-text retrieval, temporal action localization, open-set action recognition, and ego4d related tasks) are released. +- `Dec 6, 2022`: The technical report of InternVideo is released. +- `Sep 2, 2022`: Press releases ([official](https://www.shlab.org.cn/news/5443279) | [163 news](https://www.163.com/dy/article/HG939TNR0530QRMB.html) | [qq news](https://new.qq.com/rain/a/20220902A053JP00)). -This repo gives the official implmentation of '[InternVideo: General Video Foundation Models via Generative and Discriminative Learning](https://arxiv.org/abs/2212.03191)', -by Yi Wang, Kunchang Li, Yizhuo Li, Yinan He, Bingkun Huang, Zhiyu Zhao, Jilan Xu, Yi Liu, Zun Wang, Sen Xing, Guo Chen, Junting Pan, Jashuo Yu, Hongjie Zhang, Yali Wang, Limin Wang, and Yu Qiao. +## Introduction +*We present the first video foundation model to achieve high-performance on both video and video-text tasks*. + +The foundation models have recently shown excellent performance on a variety of downstream tasks in computer vision. However, most existing vision foundation models simply focus on image-level pretraining and adpation, which are limited for dynamic and complex video-level understanding tasks. To fill the gap, we present general video foundation models, *InternVideo*, by taking advantage of both generative and discriminative self-supervised video learning. Specifically, InternVideo efficiently explores masked video modeling and video-language contrastive learning as the pretraining objectives, and selectively coordinates video representations of these two complementary frameworks in a learnable manner to boost various video applications. Without bells and whistles, InternVideo achieves state-of-the-art performance on 39 video datasets from extensive tasks including video action recognition/detection, video-language alignment, and open-world video applications. Especially, our methods can obtain 91.1% and 77.2% top-1 accuracy on the challenging Kinetics-400 and Something-Something V2 benchmarks, respectively. + +## Code & model +- [ ] Video foundation model Pretraining. + - [x] [video masked modeling](Pretrain/VideoMAE). + - [x] [video-language contrastive learning modeling](Pretrain/Multi-Modalities-Pretraining). + - [x] Supervised training of [ViT (from video masked modeling)](Pretrain/VideoMAE#finetune) and [UniformerV2 (from multimodal learning)](https://github.com/OpenGVLab/UniFormerV2/blob/main/INSTRUCTIONS.md#training). + - [ ] Model interaction. +- [ ] Downstream tasks. + - [ ] Action recognition. + - [x] [Temporal action localization](Downstream/Temporal-Action-Localization). + - [x] [Spatio-temporal action localization](Downstream/Spatial-Temporal-Action-Localization). + - [x] [Video-text retrieval](Downstream/Video-Text-Retrieval). + - [x] [Video question answering](Downstream/multi-modalities-downstream#video-question-answering). + - [x] [Visual-language navigation](Downstream/Visual-Language-Navigation). + - [x] [Open-set action recognition](Downstream/Open-Set-Action-Recognition). + - [x] [Zero-shot action recognition](Downstream/multi-modalities-downstream#zero-shot-action-recognition). + - [x] [Zero-shot multiple choice](Downstream/multi-modalities-downstream#zero-shot-multiple-choice). + - [x] [Ego4D related tasks](https://github.com/OpenGVLab/ego4d-eccv2022-solutions). +- [x] [Pretrained foundation model weights](https://github.com/OpenGVLab/InternVideo#model-zoo). +- [ ] Demos for training usages and evaluations. + +## Performance +- [Video Retrieval](Downstream/Video-Text-Retrieval#our-results) + +## Model Zoo + +
+ Pretrained Models +
+
+ +| Model | Training Data | download | +| :-----------------: | :----------------------: | :---------------------------------------------------------------------------------------------------: | +| InternVideo-MM-L-14 | WebVid10M+Self-collected (14M) | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/InternVideo-MM-L-14.ckpt) | +| VideoMAE-B | UnlabeledHybrid (1M) | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e.pth) | +| VideoMAE-L | UnlabeledHybrid (1M)| [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_l_hybrid_pt_800e.pth) | +| VideoMAE-H | UnlabeledHybrid (1M)| [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_h_hybrid_pt_1200e.pth) | +
+
+ +
+ Downstream Tasks +
+
+ +**Classification** +| Model | Finetuning Data | download | +| :-----------------: | :----------------: | :---------------------------------------------------------------------------------------------------: | +| VideoMAE-B | K400 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e_k400_ft.pth) | +| VideoMAE-B | K710 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e_k710_ft.pth) | +| VideoMAE-B | SSv2 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_b_hybrid_pt_800e_ssv2_ft.pth) | +| VideoMAE-L | K400 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_l_hybrid_pt_800e_k400_ft.pth) | +| VideoMAE-L | K700 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_l_hybrid_pt_800e_k700_ft.pth) | +| VideoMAE-L | SSv2 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/pretrain/videomae/vit_l_hybrid_pt_800e_ssv2_ft.pth) | +| VideoMAE-H | K400 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/K400/VideoMAE/ViT-H_f32_res384_89.54.pth) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/K400/VideoMAE/ViT-H_f32_res384_89.54.log)| +| VideoMAE-H | SSv1 | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/SSV1/VideoMAE/ViT-H.pth) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/SSV1/VideoMAE/log.txt)| +| VideoMAE-H | HMDB51 | [ckpt_split1](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/HMDB51/VideoMAE/split1_89.64/ViT-H.pth) | [ckpt_split2](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/HMDB51/VideoMAE/split2_89.92/ViT-H.pth) | [ckpt_split3](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/action_recognition/HMDB51/VideoMAE/split3_88.35/ViT-H.pth)| + +**Retrieval** +| Model | Training Data | download | +| :-----------------: | :----------------: | :---------------------------------------------------------------------------------------------------: | +| InternVideo-MM-L-14 | ActivityNet | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/activitynet/kc4_1e-3_2e-3_bs64_77words_64frame_dsl/pytorch_model.bin) [opt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/activitynet/kc4_1e-3_2e-3_bs64_77words_64frame_dsl/pytorch_opt.bin) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/activitynet/kc4_1e-3_2e-3_bs64_77words_64frame_dsl/log.txt)| +| InternVideo-MM-L-14 | DiDeMo | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/didemo/pytorch_model.bin) [opt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/didemo/pytorch_opt.bin) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/didemo/log.txt)| +| InternVideo-MM-L-14 | LSMDC | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/lsmdc/pytorch_model.bin) [opt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/lsmdc/pytorch_opt.bin) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/lsmdc/log.txt)| +| InternVideo-MM-L-14 | MSR-VTT | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/msrvtt/kc4_finetune_1e-32e-3_77words_12frames_128_16_bothdsl/pytorch_model.bin) [opt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/msrvtt/kc4_finetune_1e-32e-3_77words_12frames_128_16_bothdsl/pytorch_opt.bin) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/msrvtt/kc4_finetune_1e-32e-3_77words_12frames_128_16_bothdsl/log.txt)| +| InternVideo-MM-L-14 | MSVD | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/msvd/pytorch_model.bin) [opt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/msvd/pytorch_opt.bin) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/msvd/log.txt)| +| InternVideo-MM-L-14 | VATEX | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/vatex/kc4_1e-35e-3_128_8frame/pytorch_model.bin) [opt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/vatex/kc4_1e-35e-3_128_8frame/pytorch_opt.bin) [log](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/retrieval/vatex/kc4_1e-35e-3_128_8frame/log.txt)| + +**VideoQA** +| Model | Finetuning Data | download | +| :-----------------: | :----------------: | :---------------------------------------------------------------------------------------------------: | +| InternVideo-MM-L-14 | MSR-VTT | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/vqa/msrvtt.ckpt) | +| InternVideo-MM-L-14 | MSVD | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/vqa/msvd.ckpt) | +| InternVideo-MM-L-14 | TGIFQA | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/vqa/tqifqa.ckpt) | + +**Spatio-Temporal Action Localization** +| Model | Finetuning Data | download | +| :-----------------: | :----------------: | :---------------------------------------------------------------------------------------------------: | +| VideoMAE-H | AVA-Kinetics | [ckpt](https://pjlab-gvm-data.oss-cn-shanghai.aliyuncs.com/internvideo/stal/vit_h_hybrid_pt_k710_ft_ak_ft.sh) | +
+
-Code will be released. +To further improve our work, please fill out the [form](https://wenjuan.feishu.cn/m?t=syQjww7QWNJi-jk5u) (or scan the below QR code) if you had time. + +survey_icon + + + + + + + + +## 引用 + +如果这项工作对您的研究有帮助,请考虑引用InternVideo和相关工作。 + +``` +@article{wang2022internvideo, + title={InternVideo: General Video Foundation Models via Generative and Discriminative Learning}, + author={Wang, Yi and Li, Kunchang and Li, Yizhuo and He, Yinan and Huang, Bingkun and Zhao, Zhiyu and Zhang, Hongjie and Xu, Jilan and Liu, Yi and Wang, Zun and Xing, Sen and Chen, Guo and Pan, Junting and Yu, Jiashuo and Wang, Yali and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2212.03191}, + year={2022} +} + +@article{wang2023videomae, + title={VideoMAE V2: Scaling Video Masked Autoencoders with Dual Masking}, + author={Wang, Limin and Huang, Bingkun and Zhao, Zhiyu and Tong, Zhan and He, Yinan and Wang, Yi and Wang, Yali and Qiao, Yu}, + journal={arXiv preprint arXiv:2303.16727}, + year={2023} +} + +@article{li2022uniformerv2, + title={UniFormerV2: Spatiotemporal Learning by Arming Image ViTs with Video UniFormer}, + author={Li, Kunchang and Wang, Yali and He, Yinan and Li, Yizhuo and Wang, Yi and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2211.09552}, + year={2022} +} + +@article{li2023unmasked, + title={Unmasked Teacher: Towards Training-Efficient Video Foundation Models}, + author={Li, Kunchang and Wang, Yali and Li, Yizhuo and Wang, Yi and He, Yinan and Wang, Limin and Qiao, Yu}, + journal={arXiv preprint arXiv:2303.16058}, + year={2023} +} +```